nmethod.cpp revision 7081:39231c6e51fe
1/*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "code/codeCache.hpp"
27#include "code/compiledIC.hpp"
28#include "code/dependencies.hpp"
29#include "code/nmethod.hpp"
30#include "code/scopeDesc.hpp"
31#include "compiler/abstractCompiler.hpp"
32#include "compiler/compileBroker.hpp"
33#include "compiler/compileLog.hpp"
34#include "compiler/compilerOracle.hpp"
35#include "compiler/disassembler.hpp"
36#include "interpreter/bytecode.hpp"
37#include "oops/methodData.hpp"
38#include "prims/jvmtiRedefineClassesTrace.hpp"
39#include "prims/jvmtiImpl.hpp"
40#include "runtime/atomic.inline.hpp"
41#include "runtime/orderAccess.inline.hpp"
42#include "runtime/sharedRuntime.hpp"
43#include "runtime/sweeper.hpp"
44#include "utilities/resourceHash.hpp"
45#include "utilities/dtrace.hpp"
46#include "utilities/events.hpp"
47#include "utilities/xmlstream.hpp"
48#ifdef SHARK
49#include "shark/sharkCompiler.hpp"
50#endif
51
52PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
53
54unsigned char nmethod::_global_unloading_clock = 0;
55
56#ifdef DTRACE_ENABLED
57
58// Only bother with this argument setup if dtrace is available
59
60#define DTRACE_METHOD_UNLOAD_PROBE(method)                                \
61  {                                                                       \
62    Method* m = (method);                                                 \
63    if (m != NULL) {                                                      \
64      Symbol* klass_name = m->klass_name();                               \
65      Symbol* name = m->name();                                           \
66      Symbol* signature = m->signature();                                 \
67      HOTSPOT_COMPILED_METHOD_UNLOAD(                                     \
68        (char *) klass_name->bytes(), klass_name->utf8_length(),                   \
69        (char *) name->bytes(), name->utf8_length(),                               \
70        (char *) signature->bytes(), signature->utf8_length());                    \
71    }                                                                     \
72  }
73
74#else //  ndef DTRACE_ENABLED
75
76#define DTRACE_METHOD_UNLOAD_PROBE(method)
77
78#endif
79
80bool nmethod::is_compiled_by_c1() const {
81  if (compiler() == NULL) {
82    return false;
83  }
84  return compiler()->is_c1();
85}
86bool nmethod::is_compiled_by_c2() const {
87  if (compiler() == NULL) {
88    return false;
89  }
90  return compiler()->is_c2();
91}
92bool nmethod::is_compiled_by_shark() const {
93  if (compiler() == NULL) {
94    return false;
95  }
96  return compiler()->is_shark();
97}
98
99
100
101//---------------------------------------------------------------------------------
102// NMethod statistics
103// They are printed under various flags, including:
104//   PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation.
105// (In the latter two cases, they like other stats are printed to the log only.)
106
107#ifndef PRODUCT
108// These variables are put into one block to reduce relocations
109// and make it simpler to print from the debugger.
110static
111struct nmethod_stats_struct {
112  int nmethod_count;
113  int total_size;
114  int relocation_size;
115  int consts_size;
116  int insts_size;
117  int stub_size;
118  int scopes_data_size;
119  int scopes_pcs_size;
120  int dependencies_size;
121  int handler_table_size;
122  int nul_chk_table_size;
123  int oops_size;
124
125  void note_nmethod(nmethod* nm) {
126    nmethod_count += 1;
127    total_size          += nm->size();
128    relocation_size     += nm->relocation_size();
129    consts_size         += nm->consts_size();
130    insts_size          += nm->insts_size();
131    stub_size           += nm->stub_size();
132    oops_size           += nm->oops_size();
133    scopes_data_size    += nm->scopes_data_size();
134    scopes_pcs_size     += nm->scopes_pcs_size();
135    dependencies_size   += nm->dependencies_size();
136    handler_table_size  += nm->handler_table_size();
137    nul_chk_table_size  += nm->nul_chk_table_size();
138  }
139  void print_nmethod_stats() {
140    if (nmethod_count == 0)  return;
141    tty->print_cr("Statistics for %d bytecoded nmethods:", nmethod_count);
142    if (total_size != 0)          tty->print_cr(" total in heap  = %d", total_size);
143    if (relocation_size != 0)     tty->print_cr(" relocation     = %d", relocation_size);
144    if (consts_size != 0)         tty->print_cr(" constants      = %d", consts_size);
145    if (insts_size != 0)          tty->print_cr(" main code      = %d", insts_size);
146    if (stub_size != 0)           tty->print_cr(" stub code      = %d", stub_size);
147    if (oops_size != 0)           tty->print_cr(" oops           = %d", oops_size);
148    if (scopes_data_size != 0)    tty->print_cr(" scopes data    = %d", scopes_data_size);
149    if (scopes_pcs_size != 0)     tty->print_cr(" scopes pcs     = %d", scopes_pcs_size);
150    if (dependencies_size != 0)   tty->print_cr(" dependencies   = %d", dependencies_size);
151    if (handler_table_size != 0)  tty->print_cr(" handler table  = %d", handler_table_size);
152    if (nul_chk_table_size != 0)  tty->print_cr(" nul chk table  = %d", nul_chk_table_size);
153  }
154
155  int native_nmethod_count;
156  int native_total_size;
157  int native_relocation_size;
158  int native_insts_size;
159  int native_oops_size;
160  void note_native_nmethod(nmethod* nm) {
161    native_nmethod_count += 1;
162    native_total_size       += nm->size();
163    native_relocation_size  += nm->relocation_size();
164    native_insts_size       += nm->insts_size();
165    native_oops_size        += nm->oops_size();
166  }
167  void print_native_nmethod_stats() {
168    if (native_nmethod_count == 0)  return;
169    tty->print_cr("Statistics for %d native nmethods:", native_nmethod_count);
170    if (native_total_size != 0)       tty->print_cr(" N. total size  = %d", native_total_size);
171    if (native_relocation_size != 0)  tty->print_cr(" N. relocation  = %d", native_relocation_size);
172    if (native_insts_size != 0)       tty->print_cr(" N. main code   = %d", native_insts_size);
173    if (native_oops_size != 0)        tty->print_cr(" N. oops        = %d", native_oops_size);
174  }
175
176  int pc_desc_resets;   // number of resets (= number of caches)
177  int pc_desc_queries;  // queries to nmethod::find_pc_desc
178  int pc_desc_approx;   // number of those which have approximate true
179  int pc_desc_repeats;  // number of _pc_descs[0] hits
180  int pc_desc_hits;     // number of LRU cache hits
181  int pc_desc_tests;    // total number of PcDesc examinations
182  int pc_desc_searches; // total number of quasi-binary search steps
183  int pc_desc_adds;     // number of LUR cache insertions
184
185  void print_pc_stats() {
186    tty->print_cr("PcDesc Statistics:  %d queries, %.2f comparisons per query",
187                  pc_desc_queries,
188                  (double)(pc_desc_tests + pc_desc_searches)
189                  / pc_desc_queries);
190    tty->print_cr("  caches=%d queries=%d/%d, hits=%d+%d, tests=%d+%d, adds=%d",
191                  pc_desc_resets,
192                  pc_desc_queries, pc_desc_approx,
193                  pc_desc_repeats, pc_desc_hits,
194                  pc_desc_tests, pc_desc_searches, pc_desc_adds);
195  }
196} nmethod_stats;
197#endif //PRODUCT
198
199
200//---------------------------------------------------------------------------------
201
202
203ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
204  assert(pc != NULL, "Must be non null");
205  assert(exception.not_null(), "Must be non null");
206  assert(handler != NULL, "Must be non null");
207
208  _count = 0;
209  _exception_type = exception->klass();
210  _next = NULL;
211
212  add_address_and_handler(pc,handler);
213}
214
215
216address ExceptionCache::match(Handle exception, address pc) {
217  assert(pc != NULL,"Must be non null");
218  assert(exception.not_null(),"Must be non null");
219  if (exception->klass() == exception_type()) {
220    return (test_address(pc));
221  }
222
223  return NULL;
224}
225
226
227bool ExceptionCache::match_exception_with_space(Handle exception) {
228  assert(exception.not_null(),"Must be non null");
229  if (exception->klass() == exception_type() && count() < cache_size) {
230    return true;
231  }
232  return false;
233}
234
235
236address ExceptionCache::test_address(address addr) {
237  for (int i=0; i<count(); i++) {
238    if (pc_at(i) == addr) {
239      return handler_at(i);
240    }
241  }
242  return NULL;
243}
244
245
246bool ExceptionCache::add_address_and_handler(address addr, address handler) {
247  if (test_address(addr) == handler) return true;
248  if (count() < cache_size) {
249    set_pc_at(count(),addr);
250    set_handler_at(count(), handler);
251    increment_count();
252    return true;
253  }
254  return false;
255}
256
257
258// private method for handling exception cache
259// These methods are private, and used to manipulate the exception cache
260// directly.
261ExceptionCache* nmethod::exception_cache_entry_for_exception(Handle exception) {
262  ExceptionCache* ec = exception_cache();
263  while (ec != NULL) {
264    if (ec->match_exception_with_space(exception)) {
265      return ec;
266    }
267    ec = ec->next();
268  }
269  return NULL;
270}
271
272
273//-----------------------------------------------------------------------------
274
275
276// Helper used by both find_pc_desc methods.
277static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) {
278  NOT_PRODUCT(++nmethod_stats.pc_desc_tests);
279  if (!approximate)
280    return pc->pc_offset() == pc_offset;
281  else
282    return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset();
283}
284
285void PcDescCache::reset_to(PcDesc* initial_pc_desc) {
286  if (initial_pc_desc == NULL) {
287    _pc_descs[0] = NULL; // native method; no PcDescs at all
288    return;
289  }
290  NOT_PRODUCT(++nmethod_stats.pc_desc_resets);
291  // reset the cache by filling it with benign (non-null) values
292  assert(initial_pc_desc->pc_offset() < 0, "must be sentinel");
293  for (int i = 0; i < cache_size; i++)
294    _pc_descs[i] = initial_pc_desc;
295}
296
297PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
298  NOT_PRODUCT(++nmethod_stats.pc_desc_queries);
299  NOT_PRODUCT(if (approximate) ++nmethod_stats.pc_desc_approx);
300
301  // Note: one might think that caching the most recently
302  // read value separately would be a win, but one would be
303  // wrong.  When many threads are updating it, the cache
304  // line it's in would bounce between caches, negating
305  // any benefit.
306
307  // In order to prevent race conditions do not load cache elements
308  // repeatedly, but use a local copy:
309  PcDesc* res;
310
311  // Step one:  Check the most recently added value.
312  res = _pc_descs[0];
313  if (res == NULL) return NULL;  // native method; no PcDescs at all
314  if (match_desc(res, pc_offset, approximate)) {
315    NOT_PRODUCT(++nmethod_stats.pc_desc_repeats);
316    return res;
317  }
318
319  // Step two:  Check the rest of the LRU cache.
320  for (int i = 1; i < cache_size; ++i) {
321    res = _pc_descs[i];
322    if (res->pc_offset() < 0) break;  // optimization: skip empty cache
323    if (match_desc(res, pc_offset, approximate)) {
324      NOT_PRODUCT(++nmethod_stats.pc_desc_hits);
325      return res;
326    }
327  }
328
329  // Report failure.
330  return NULL;
331}
332
333void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
334  NOT_PRODUCT(++nmethod_stats.pc_desc_adds);
335  // Update the LRU cache by shifting pc_desc forward.
336  for (int i = 0; i < cache_size; i++)  {
337    PcDesc* next = _pc_descs[i];
338    _pc_descs[i] = pc_desc;
339    pc_desc = next;
340  }
341}
342
343// adjust pcs_size so that it is a multiple of both oopSize and
344// sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
345// of oopSize, then 2*sizeof(PcDesc) is)
346static int adjust_pcs_size(int pcs_size) {
347  int nsize = round_to(pcs_size,   oopSize);
348  if ((nsize % sizeof(PcDesc)) != 0) {
349    nsize = pcs_size + sizeof(PcDesc);
350  }
351  assert((nsize % oopSize) == 0, "correct alignment");
352  return nsize;
353}
354
355//-----------------------------------------------------------------------------
356
357
358void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {
359  assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
360  assert(new_entry != NULL,"Must be non null");
361  assert(new_entry->next() == NULL, "Must be null");
362
363  if (exception_cache() != NULL) {
364    new_entry->set_next(exception_cache());
365  }
366  set_exception_cache(new_entry);
367}
368
369void nmethod::clean_exception_cache(BoolObjectClosure* is_alive) {
370  ExceptionCache* prev = NULL;
371  ExceptionCache* curr = exception_cache();
372
373  while (curr != NULL) {
374    ExceptionCache* next = curr->next();
375
376    Klass* ex_klass = curr->exception_type();
377    if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
378      if (prev == NULL) {
379        set_exception_cache(next);
380      } else {
381        prev->set_next(next);
382      }
383      delete curr;
384      // prev stays the same.
385    } else {
386      prev = curr;
387    }
388
389    curr = next;
390  }
391}
392
393// public method for accessing the exception cache
394// These are the public access methods.
395address nmethod::handler_for_exception_and_pc(Handle exception, address pc) {
396  // We never grab a lock to read the exception cache, so we may
397  // have false negatives. This is okay, as it can only happen during
398  // the first few exception lookups for a given nmethod.
399  ExceptionCache* ec = exception_cache();
400  while (ec != NULL) {
401    address ret_val;
402    if ((ret_val = ec->match(exception,pc)) != NULL) {
403      return ret_val;
404    }
405    ec = ec->next();
406  }
407  return NULL;
408}
409
410
411void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
412  // There are potential race conditions during exception cache updates, so we
413  // must own the ExceptionCache_lock before doing ANY modifications. Because
414  // we don't lock during reads, it is possible to have several threads attempt
415  // to update the cache with the same data. We need to check for already inserted
416  // copies of the current data before adding it.
417
418  MutexLocker ml(ExceptionCache_lock);
419  ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
420
421  if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
422    target_entry = new ExceptionCache(exception,pc,handler);
423    add_exception_cache_entry(target_entry);
424  }
425}
426
427
428//-------------end of code for ExceptionCache--------------
429
430
431int nmethod::total_size() const {
432  return
433    consts_size()        +
434    insts_size()         +
435    stub_size()          +
436    scopes_data_size()   +
437    scopes_pcs_size()    +
438    handler_table_size() +
439    nul_chk_table_size();
440}
441
442const char* nmethod::compile_kind() const {
443  if (is_osr_method())     return "osr";
444  if (method() != NULL && is_native_method())  return "c2n";
445  return NULL;
446}
447
448// Fill in default values for various flag fields
449void nmethod::init_defaults() {
450  _state                      = in_use;
451  _unloading_clock            = 0;
452  _marked_for_reclamation     = 0;
453  _has_flushed_dependencies   = 0;
454  _has_unsafe_access          = 0;
455  _has_method_handle_invokes  = 0;
456  _lazy_critical_native       = 0;
457  _has_wide_vectors           = 0;
458  _marked_for_deoptimization  = 0;
459  _lock_count                 = 0;
460  _stack_traversal_mark       = 0;
461  _unload_reported            = false;           // jvmti state
462
463#ifdef ASSERT
464  _oops_are_stale             = false;
465#endif
466
467  _oops_do_mark_link       = NULL;
468  _jmethod_id              = NULL;
469  _osr_link                = NULL;
470  if (UseG1GC) {
471    _unloading_next        = NULL;
472  } else {
473    _scavenge_root_link    = NULL;
474  }
475  _scavenge_root_state     = 0;
476  _compiler                = NULL;
477#if INCLUDE_RTM_OPT
478  _rtm_state               = NoRTM;
479#endif
480#ifdef HAVE_DTRACE_H
481  _trap_offset             = 0;
482#endif // def HAVE_DTRACE_H
483}
484
485nmethod* nmethod::new_native_nmethod(methodHandle method,
486  int compile_id,
487  CodeBuffer *code_buffer,
488  int vep_offset,
489  int frame_complete,
490  int frame_size,
491  ByteSize basic_lock_owner_sp_offset,
492  ByteSize basic_lock_sp_offset,
493  OopMapSet* oop_maps) {
494  code_buffer->finalize_oop_references(method);
495  // create nmethod
496  nmethod* nm = NULL;
497  {
498    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
499    int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
500    CodeOffsets offsets;
501    offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
502    offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
503    nm = new (native_nmethod_size, CompLevel_none) nmethod(method(), native_nmethod_size,
504                                            compile_id, &offsets,
505                                            code_buffer, frame_size,
506                                            basic_lock_owner_sp_offset,
507                                            basic_lock_sp_offset, oop_maps);
508    NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_native_nmethod(nm));
509    if (PrintAssembly && nm != NULL) {
510      Disassembler::decode(nm);
511    }
512  }
513  // verify nmethod
514  debug_only(if (nm) nm->verify();) // might block
515
516  if (nm != NULL) {
517    nm->log_new_nmethod();
518  }
519
520  return nm;
521}
522
523#ifdef HAVE_DTRACE_H
524nmethod* nmethod::new_dtrace_nmethod(methodHandle method,
525                                     CodeBuffer *code_buffer,
526                                     int vep_offset,
527                                     int trap_offset,
528                                     int frame_complete,
529                                     int frame_size) {
530  code_buffer->finalize_oop_references(method);
531  // create nmethod
532  nmethod* nm = NULL;
533  {
534    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
535    int nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
536    CodeOffsets offsets;
537    offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
538    offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
539    offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
540
541    nm = new (nmethod_size, CompLevel_none) nmethod(method(), nmethod_size,
542                                    &offsets, code_buffer, frame_size);
543
544    NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_nmethod(nm));
545    if (PrintAssembly && nm != NULL) {
546      Disassembler::decode(nm);
547    }
548  }
549  // verify nmethod
550  debug_only(if (nm) nm->verify();) // might block
551
552  if (nm != NULL) {
553    nm->log_new_nmethod();
554  }
555
556  return nm;
557}
558
559#endif // def HAVE_DTRACE_H
560
561nmethod* nmethod::new_nmethod(methodHandle method,
562  int compile_id,
563  int entry_bci,
564  CodeOffsets* offsets,
565  int orig_pc_offset,
566  DebugInformationRecorder* debug_info,
567  Dependencies* dependencies,
568  CodeBuffer* code_buffer, int frame_size,
569  OopMapSet* oop_maps,
570  ExceptionHandlerTable* handler_table,
571  ImplicitExceptionTable* nul_chk_table,
572  AbstractCompiler* compiler,
573  int comp_level
574)
575{
576  assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
577  code_buffer->finalize_oop_references(method);
578  // create nmethod
579  nmethod* nm = NULL;
580  { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
581    int nmethod_size =
582      allocation_size(code_buffer, sizeof(nmethod))
583      + adjust_pcs_size(debug_info->pcs_size())
584      + round_to(dependencies->size_in_bytes() , oopSize)
585      + round_to(handler_table->size_in_bytes(), oopSize)
586      + round_to(nul_chk_table->size_in_bytes(), oopSize)
587      + round_to(debug_info->data_size()       , oopSize);
588
589    nm = new (nmethod_size, comp_level)
590    nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
591            orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
592            oop_maps,
593            handler_table,
594            nul_chk_table,
595            compiler,
596            comp_level);
597
598    if (nm != NULL) {
599      // To make dependency checking during class loading fast, record
600      // the nmethod dependencies in the classes it is dependent on.
601      // This allows the dependency checking code to simply walk the
602      // class hierarchy above the loaded class, checking only nmethods
603      // which are dependent on those classes.  The slow way is to
604      // check every nmethod for dependencies which makes it linear in
605      // the number of methods compiled.  For applications with a lot
606      // classes the slow way is too slow.
607      for (Dependencies::DepStream deps(nm); deps.next(); ) {
608        Klass* klass = deps.context_type();
609        if (klass == NULL) {
610          continue;  // ignore things like evol_method
611        }
612
613        // record this nmethod as dependent on this klass
614        InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
615      }
616      NOT_PRODUCT(nmethod_stats.note_nmethod(nm));
617      if (PrintAssembly || CompilerOracle::has_option_string(method, "PrintAssembly")) {
618        Disassembler::decode(nm);
619      }
620    }
621  }
622  // Do verification and logging outside CodeCache_lock.
623  if (nm != NULL) {
624    // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
625    DEBUG_ONLY(nm->verify();)
626    nm->log_new_nmethod();
627  }
628  return nm;
629}
630
631
632// For native wrappers
633nmethod::nmethod(
634  Method* method,
635  int nmethod_size,
636  int compile_id,
637  CodeOffsets* offsets,
638  CodeBuffer* code_buffer,
639  int frame_size,
640  ByteSize basic_lock_owner_sp_offset,
641  ByteSize basic_lock_sp_offset,
642  OopMapSet* oop_maps )
643  : CodeBlob("native nmethod", code_buffer, sizeof(nmethod),
644             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
645  _native_receiver_sp_offset(basic_lock_owner_sp_offset),
646  _native_basic_lock_sp_offset(basic_lock_sp_offset)
647{
648  {
649    debug_only(No_Safepoint_Verifier nsv;)
650    assert_locked_or_safepoint(CodeCache_lock);
651
652    init_defaults();
653    _method                  = method;
654    _entry_bci               = InvocationEntryBci;
655    // We have no exception handler or deopt handler make the
656    // values something that will never match a pc like the nmethod vtable entry
657    _exception_offset        = 0;
658    _deoptimize_offset       = 0;
659    _deoptimize_mh_offset    = 0;
660    _orig_pc_offset          = 0;
661
662    _consts_offset           = data_offset();
663    _stub_offset             = data_offset();
664    _oops_offset             = data_offset();
665    _metadata_offset         = _oops_offset         + round_to(code_buffer->total_oop_size(), oopSize);
666    _scopes_data_offset      = _metadata_offset     + round_to(code_buffer->total_metadata_size(), wordSize);
667    _scopes_pcs_offset       = _scopes_data_offset;
668    _dependencies_offset     = _scopes_pcs_offset;
669    _handler_table_offset    = _dependencies_offset;
670    _nul_chk_table_offset    = _handler_table_offset;
671    _nmethod_end_offset      = _nul_chk_table_offset;
672    _compile_id              = compile_id;
673    _comp_level              = CompLevel_none;
674    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
675    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
676    _osr_entry_point         = NULL;
677    _exception_cache         = NULL;
678    _pc_desc_cache.reset_to(NULL);
679    _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
680
681    code_buffer->copy_values_to(this);
682    if (ScavengeRootsInCode) {
683      if (detect_scavenge_root_oops()) {
684        CodeCache::add_scavenge_root_nmethod(this);
685      }
686      Universe::heap()->register_nmethod(this);
687    }
688    debug_only(verify_scavenge_root_oops());
689    CodeCache::commit(this);
690  }
691
692  if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
693    ttyLocker ttyl;  // keep the following output all in one block
694    // This output goes directly to the tty, not the compiler log.
695    // To enable tools to match it up with the compilation activity,
696    // be sure to tag this tty output with the compile ID.
697    if (xtty != NULL) {
698      xtty->begin_head("print_native_nmethod");
699      xtty->method(_method);
700      xtty->stamp();
701      xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
702    }
703    // print the header part first
704    print();
705    // then print the requested information
706    if (PrintNativeNMethods) {
707      print_code();
708      if (oop_maps != NULL) {
709        oop_maps->print();
710      }
711    }
712    if (PrintRelocations) {
713      print_relocations();
714    }
715    if (xtty != NULL) {
716      xtty->tail("print_native_nmethod");
717    }
718  }
719}
720
721// For dtrace wrappers
722#ifdef HAVE_DTRACE_H
723nmethod::nmethod(
724  Method* method,
725  int nmethod_size,
726  CodeOffsets* offsets,
727  CodeBuffer* code_buffer,
728  int frame_size)
729  : CodeBlob("dtrace nmethod", code_buffer, sizeof(nmethod),
730             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, NULL),
731  _native_receiver_sp_offset(in_ByteSize(-1)),
732  _native_basic_lock_sp_offset(in_ByteSize(-1))
733{
734  {
735    debug_only(No_Safepoint_Verifier nsv;)
736    assert_locked_or_safepoint(CodeCache_lock);
737
738    init_defaults();
739    _method                  = method;
740    _entry_bci               = InvocationEntryBci;
741    // We have no exception handler or deopt handler make the
742    // values something that will never match a pc like the nmethod vtable entry
743    _exception_offset        = 0;
744    _deoptimize_offset       = 0;
745    _deoptimize_mh_offset    = 0;
746    _unwind_handler_offset   = -1;
747    _trap_offset             = offsets->value(CodeOffsets::Dtrace_trap);
748    _orig_pc_offset          = 0;
749    _consts_offset           = data_offset();
750    _stub_offset             = data_offset();
751    _oops_offset             = data_offset();
752    _metadata_offset         = _oops_offset         + round_to(code_buffer->total_oop_size(), oopSize);
753    _scopes_data_offset      = _metadata_offset     + round_to(code_buffer->total_metadata_size(), wordSize);
754    _scopes_pcs_offset       = _scopes_data_offset;
755    _dependencies_offset     = _scopes_pcs_offset;
756    _handler_table_offset    = _dependencies_offset;
757    _nul_chk_table_offset    = _handler_table_offset;
758    _nmethod_end_offset      = _nul_chk_table_offset;
759    _compile_id              = 0;  // default
760    _comp_level              = CompLevel_none;
761    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
762    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
763    _osr_entry_point         = NULL;
764    _exception_cache         = NULL;
765    _pc_desc_cache.reset_to(NULL);
766    _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
767
768    code_buffer->copy_values_to(this);
769    if (ScavengeRootsInCode) {
770      if (detect_scavenge_root_oops()) {
771        CodeCache::add_scavenge_root_nmethod(this);
772      }
773      Universe::heap()->register_nmethod(this);
774    }
775    DEBUG_ONLY(verify_scavenge_root_oops();)
776    CodeCache::commit(this);
777  }
778
779  if (PrintNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
780    ttyLocker ttyl;  // keep the following output all in one block
781    // This output goes directly to the tty, not the compiler log.
782    // To enable tools to match it up with the compilation activity,
783    // be sure to tag this tty output with the compile ID.
784    if (xtty != NULL) {
785      xtty->begin_head("print_dtrace_nmethod");
786      xtty->method(_method);
787      xtty->stamp();
788      xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
789    }
790    // print the header part first
791    print();
792    // then print the requested information
793    if (PrintNMethods) {
794      print_code();
795    }
796    if (PrintRelocations) {
797      print_relocations();
798    }
799    if (xtty != NULL) {
800      xtty->tail("print_dtrace_nmethod");
801    }
802  }
803}
804#endif // def HAVE_DTRACE_H
805
806void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
807  // With a SegmentedCodeCache, nmethods are allocated on separate heaps and therefore do not share memory
808  // with critical CodeBlobs. We define the allocation as critical to make sure all code heap memory is used.
809  bool is_critical = SegmentedCodeCache;
810  return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level), is_critical);
811}
812
813nmethod::nmethod(
814  Method* method,
815  int nmethod_size,
816  int compile_id,
817  int entry_bci,
818  CodeOffsets* offsets,
819  int orig_pc_offset,
820  DebugInformationRecorder* debug_info,
821  Dependencies* dependencies,
822  CodeBuffer *code_buffer,
823  int frame_size,
824  OopMapSet* oop_maps,
825  ExceptionHandlerTable* handler_table,
826  ImplicitExceptionTable* nul_chk_table,
827  AbstractCompiler* compiler,
828  int comp_level
829  )
830  : CodeBlob("nmethod", code_buffer, sizeof(nmethod),
831             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
832  _native_receiver_sp_offset(in_ByteSize(-1)),
833  _native_basic_lock_sp_offset(in_ByteSize(-1))
834{
835  assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
836  {
837    debug_only(No_Safepoint_Verifier nsv;)
838    assert_locked_or_safepoint(CodeCache_lock);
839
840    init_defaults();
841    _method                  = method;
842    _entry_bci               = entry_bci;
843    _compile_id              = compile_id;
844    _comp_level              = comp_level;
845    _compiler                = compiler;
846    _orig_pc_offset          = orig_pc_offset;
847    _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
848
849    // Section offsets
850    _consts_offset           = content_offset()      + code_buffer->total_offset_of(code_buffer->consts());
851    _stub_offset             = content_offset()      + code_buffer->total_offset_of(code_buffer->stubs());
852
853    // Exception handler and deopt handler are in the stub section
854    assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set");
855    assert(offsets->value(CodeOffsets::Deopt     ) != -1, "must be set");
856    _exception_offset        = _stub_offset          + offsets->value(CodeOffsets::Exceptions);
857    _deoptimize_offset       = _stub_offset          + offsets->value(CodeOffsets::Deopt);
858    if (offsets->value(CodeOffsets::DeoptMH) != -1) {
859      _deoptimize_mh_offset  = _stub_offset          + offsets->value(CodeOffsets::DeoptMH);
860    } else {
861      _deoptimize_mh_offset  = -1;
862    }
863    if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
864      _unwind_handler_offset = code_offset()         + offsets->value(CodeOffsets::UnwindHandler);
865    } else {
866      _unwind_handler_offset = -1;
867    }
868
869    _oops_offset             = data_offset();
870    _metadata_offset         = _oops_offset          + round_to(code_buffer->total_oop_size(), oopSize);
871    _scopes_data_offset      = _metadata_offset      + round_to(code_buffer->total_metadata_size(), wordSize);
872
873    _scopes_pcs_offset       = _scopes_data_offset   + round_to(debug_info->data_size       (), oopSize);
874    _dependencies_offset     = _scopes_pcs_offset    + adjust_pcs_size(debug_info->pcs_size());
875    _handler_table_offset    = _dependencies_offset  + round_to(dependencies->size_in_bytes (), oopSize);
876    _nul_chk_table_offset    = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize);
877    _nmethod_end_offset      = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize);
878
879    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
880    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
881    _osr_entry_point         = code_begin()          + offsets->value(CodeOffsets::OSR_Entry);
882    _exception_cache         = NULL;
883    _pc_desc_cache.reset_to(scopes_pcs_begin());
884
885    // Copy contents of ScopeDescRecorder to nmethod
886    code_buffer->copy_values_to(this);
887    debug_info->copy_to(this);
888    dependencies->copy_to(this);
889    if (ScavengeRootsInCode) {
890      if (detect_scavenge_root_oops()) {
891        CodeCache::add_scavenge_root_nmethod(this);
892      }
893      Universe::heap()->register_nmethod(this);
894    }
895    debug_only(verify_scavenge_root_oops());
896
897    CodeCache::commit(this);
898
899    // Copy contents of ExceptionHandlerTable to nmethod
900    handler_table->copy_to(this);
901    nul_chk_table->copy_to(this);
902
903    // we use the information of entry points to find out if a method is
904    // static or non static
905    assert(compiler->is_c2() ||
906           _method->is_static() == (entry_point() == _verified_entry_point),
907           " entry points must be same for static methods and vice versa");
908  }
909
910  bool printnmethods = PrintNMethods
911    || CompilerOracle::should_print(_method)
912    || CompilerOracle::has_option_string(_method, "PrintNMethods");
913  if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
914    print_nmethod(printnmethods);
915  }
916}
917
918
919// Print a short set of xml attributes to identify this nmethod.  The
920// output should be embedded in some other element.
921void nmethod::log_identity(xmlStream* log) const {
922  log->print(" compile_id='%d'", compile_id());
923  const char* nm_kind = compile_kind();
924  if (nm_kind != NULL)  log->print(" compile_kind='%s'", nm_kind);
925  if (compiler() != NULL) {
926    log->print(" compiler='%s'", compiler()->name());
927  }
928  if (TieredCompilation) {
929    log->print(" level='%d'", comp_level());
930  }
931}
932
933
934#define LOG_OFFSET(log, name)                    \
935  if ((intptr_t)name##_end() - (intptr_t)name##_begin()) \
936    log->print(" " XSTR(name) "_offset='%d'"    , \
937               (intptr_t)name##_begin() - (intptr_t)this)
938
939
940void nmethod::log_new_nmethod() const {
941  if (LogCompilation && xtty != NULL) {
942    ttyLocker ttyl;
943    HandleMark hm;
944    xtty->begin_elem("nmethod");
945    log_identity(xtty);
946    xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", code_begin(), size());
947    xtty->print(" address='" INTPTR_FORMAT "'", (intptr_t) this);
948
949    LOG_OFFSET(xtty, relocation);
950    LOG_OFFSET(xtty, consts);
951    LOG_OFFSET(xtty, insts);
952    LOG_OFFSET(xtty, stub);
953    LOG_OFFSET(xtty, scopes_data);
954    LOG_OFFSET(xtty, scopes_pcs);
955    LOG_OFFSET(xtty, dependencies);
956    LOG_OFFSET(xtty, handler_table);
957    LOG_OFFSET(xtty, nul_chk_table);
958    LOG_OFFSET(xtty, oops);
959
960    xtty->method(method());
961    xtty->stamp();
962    xtty->end_elem();
963  }
964}
965
966#undef LOG_OFFSET
967
968
969// Print out more verbose output usually for a newly created nmethod.
970void nmethod::print_on(outputStream* st, const char* msg) const {
971  if (st != NULL) {
972    ttyLocker ttyl;
973    if (WizardMode) {
974      CompileTask::print_compilation(st, this, msg, /*short_form:*/ true);
975      st->print_cr(" (" INTPTR_FORMAT ")", this);
976    } else {
977      CompileTask::print_compilation(st, this, msg, /*short_form:*/ false);
978    }
979  }
980}
981
982
983void nmethod::print_nmethod(bool printmethod) {
984  ttyLocker ttyl;  // keep the following output all in one block
985  if (xtty != NULL) {
986    xtty->begin_head("print_nmethod");
987    xtty->stamp();
988    xtty->end_head();
989  }
990  // print the header part first
991  print();
992  // then print the requested information
993  if (printmethod) {
994    print_code();
995    print_pcs();
996    if (oop_maps()) {
997      oop_maps()->print();
998    }
999  }
1000  if (PrintDebugInfo) {
1001    print_scopes();
1002  }
1003  if (PrintRelocations) {
1004    print_relocations();
1005  }
1006  if (PrintDependencies) {
1007    print_dependencies();
1008  }
1009  if (PrintExceptionHandlers) {
1010    print_handler_table();
1011    print_nul_chk_table();
1012  }
1013  if (xtty != NULL) {
1014    xtty->tail("print_nmethod");
1015  }
1016}
1017
1018
1019// Promote one word from an assembly-time handle to a live embedded oop.
1020inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
1021  if (handle == NULL ||
1022      // As a special case, IC oops are initialized to 1 or -1.
1023      handle == (jobject) Universe::non_oop_word()) {
1024    (*dest) = (oop) handle;
1025  } else {
1026    (*dest) = JNIHandles::resolve_non_null(handle);
1027  }
1028}
1029
1030
1031// Have to have the same name because it's called by a template
1032void nmethod::copy_values(GrowableArray<jobject>* array) {
1033  int length = array->length();
1034  assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
1035  oop* dest = oops_begin();
1036  for (int index = 0 ; index < length; index++) {
1037    initialize_immediate_oop(&dest[index], array->at(index));
1038  }
1039
1040  // Now we can fix up all the oops in the code.  We need to do this
1041  // in the code because the assembler uses jobjects as placeholders.
1042  // The code and relocations have already been initialized by the
1043  // CodeBlob constructor, so it is valid even at this early point to
1044  // iterate over relocations and patch the code.
1045  fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
1046}
1047
1048void nmethod::copy_values(GrowableArray<Metadata*>* array) {
1049  int length = array->length();
1050  assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough");
1051  Metadata** dest = metadata_begin();
1052  for (int index = 0 ; index < length; index++) {
1053    dest[index] = array->at(index);
1054  }
1055}
1056
1057bool nmethod::is_at_poll_return(address pc) {
1058  RelocIterator iter(this, pc, pc+1);
1059  while (iter.next()) {
1060    if (iter.type() == relocInfo::poll_return_type)
1061      return true;
1062  }
1063  return false;
1064}
1065
1066
1067bool nmethod::is_at_poll_or_poll_return(address pc) {
1068  RelocIterator iter(this, pc, pc+1);
1069  while (iter.next()) {
1070    relocInfo::relocType t = iter.type();
1071    if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
1072      return true;
1073  }
1074  return false;
1075}
1076
1077
1078void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
1079  // re-patch all oop-bearing instructions, just in case some oops moved
1080  RelocIterator iter(this, begin, end);
1081  while (iter.next()) {
1082    if (iter.type() == relocInfo::oop_type) {
1083      oop_Relocation* reloc = iter.oop_reloc();
1084      if (initialize_immediates && reloc->oop_is_immediate()) {
1085        oop* dest = reloc->oop_addr();
1086        initialize_immediate_oop(dest, (jobject) *dest);
1087      }
1088      // Refresh the oop-related bits of this instruction.
1089      reloc->fix_oop_relocation();
1090    } else if (iter.type() == relocInfo::metadata_type) {
1091      metadata_Relocation* reloc = iter.metadata_reloc();
1092      reloc->fix_metadata_relocation();
1093    }
1094  }
1095}
1096
1097
1098void nmethod::verify_oop_relocations() {
1099  // Ensure sure that the code matches the current oop values
1100  RelocIterator iter(this, NULL, NULL);
1101  while (iter.next()) {
1102    if (iter.type() == relocInfo::oop_type) {
1103      oop_Relocation* reloc = iter.oop_reloc();
1104      if (!reloc->oop_is_immediate()) {
1105        reloc->verify_oop_relocation();
1106      }
1107    }
1108  }
1109}
1110
1111
1112ScopeDesc* nmethod::scope_desc_at(address pc) {
1113  PcDesc* pd = pc_desc_at(pc);
1114  guarantee(pd != NULL, "scope must be present");
1115  return new ScopeDesc(this, pd->scope_decode_offset(),
1116                       pd->obj_decode_offset(), pd->should_reexecute(),
1117                       pd->return_oop());
1118}
1119
1120
1121void nmethod::clear_inline_caches() {
1122  assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
1123  if (is_zombie()) {
1124    return;
1125  }
1126
1127  RelocIterator iter(this);
1128  while (iter.next()) {
1129    iter.reloc()->clear_inline_cache();
1130  }
1131}
1132
1133
1134void nmethod::cleanup_inline_caches() {
1135
1136  assert_locked_or_safepoint(CompiledIC_lock);
1137
1138  // If the method is not entrant or zombie then a JMP is plastered over the
1139  // first few bytes.  If an oop in the old code was there, that oop
1140  // should not get GC'd.  Skip the first few bytes of oops on
1141  // not-entrant methods.
1142  address low_boundary = verified_entry_point();
1143  if (!is_in_use()) {
1144    low_boundary += NativeJump::instruction_size;
1145    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1146    // This means that the low_boundary is going to be a little too high.
1147    // This shouldn't matter, since oops of non-entrant methods are never used.
1148    // In fact, why are we bothering to look at oops in a non-entrant method??
1149  }
1150
1151  // Find all calls in an nmethod, and clear the ones that points to zombie methods
1152  ResourceMark rm;
1153  RelocIterator iter(this, low_boundary);
1154  while(iter.next()) {
1155    switch(iter.type()) {
1156      case relocInfo::virtual_call_type:
1157      case relocInfo::opt_virtual_call_type: {
1158        CompiledIC *ic = CompiledIC_at(&iter);
1159        // Ok, to lookup references to zombies here
1160        CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
1161        if( cb != NULL && cb->is_nmethod() ) {
1162          nmethod* nm = (nmethod*)cb;
1163          // Clean inline caches pointing to both zombie and not_entrant methods
1164          if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean();
1165        }
1166        break;
1167      }
1168      case relocInfo::static_call_type: {
1169        CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
1170        CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
1171        if( cb != NULL && cb->is_nmethod() ) {
1172          nmethod* nm = (nmethod*)cb;
1173          // Clean inline caches pointing to both zombie and not_entrant methods
1174          if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();
1175        }
1176        break;
1177      }
1178    }
1179  }
1180}
1181
1182void nmethod::verify_clean_inline_caches() {
1183  assert_locked_or_safepoint(CompiledIC_lock);
1184
1185  // If the method is not entrant or zombie then a JMP is plastered over the
1186  // first few bytes.  If an oop in the old code was there, that oop
1187  // should not get GC'd.  Skip the first few bytes of oops on
1188  // not-entrant methods.
1189  address low_boundary = verified_entry_point();
1190  if (!is_in_use()) {
1191    low_boundary += NativeJump::instruction_size;
1192    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1193    // This means that the low_boundary is going to be a little too high.
1194    // This shouldn't matter, since oops of non-entrant methods are never used.
1195    // In fact, why are we bothering to look at oops in a non-entrant method??
1196  }
1197
1198  ResourceMark rm;
1199  RelocIterator iter(this, low_boundary);
1200  while(iter.next()) {
1201    switch(iter.type()) {
1202      case relocInfo::virtual_call_type:
1203      case relocInfo::opt_virtual_call_type: {
1204        CompiledIC *ic = CompiledIC_at(&iter);
1205        // Ok, to lookup references to zombies here
1206        CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
1207        if( cb != NULL && cb->is_nmethod() ) {
1208          nmethod* nm = (nmethod*)cb;
1209          // Verify that inline caches pointing to both zombie and not_entrant methods are clean
1210          if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1211            assert(ic->is_clean(), "IC should be clean");
1212          }
1213        }
1214        break;
1215      }
1216      case relocInfo::static_call_type: {
1217        CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
1218        CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
1219        if( cb != NULL && cb->is_nmethod() ) {
1220          nmethod* nm = (nmethod*)cb;
1221          // Verify that inline caches pointing to both zombie and not_entrant methods are clean
1222          if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1223            assert(csc->is_clean(), "IC should be clean");
1224          }
1225        }
1226        break;
1227      }
1228    }
1229  }
1230}
1231
1232int nmethod::verify_icholder_relocations() {
1233  int count = 0;
1234
1235  RelocIterator iter(this);
1236  while(iter.next()) {
1237    if (iter.type() == relocInfo::virtual_call_type) {
1238      if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
1239        CompiledIC *ic = CompiledIC_at(&iter);
1240        if (TraceCompiledIC) {
1241          tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
1242          ic->print();
1243        }
1244        assert(ic->cached_icholder() != NULL, "must be non-NULL");
1245        count++;
1246      }
1247    }
1248  }
1249
1250  return count;
1251}
1252
1253// This is a private interface with the sweeper.
1254void nmethod::mark_as_seen_on_stack() {
1255  assert(is_alive(), "Must be an alive method");
1256  // Set the traversal mark to ensure that the sweeper does 2
1257  // cleaning passes before moving to zombie.
1258  set_stack_traversal_mark(NMethodSweeper::traversal_count());
1259}
1260
1261// Tell if a non-entrant method can be converted to a zombie (i.e.,
1262// there are no activations on the stack, not in use by the VM,
1263// and not in use by the ServiceThread)
1264bool nmethod::can_not_entrant_be_converted() {
1265  assert(is_not_entrant(), "must be a non-entrant method");
1266
1267  // Since the nmethod sweeper only does partial sweep the sweeper's traversal
1268  // count can be greater than the stack traversal count before it hits the
1269  // nmethod for the second time.
1270  return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() &&
1271         !is_locked_by_vm();
1272}
1273
1274void nmethod::inc_decompile_count() {
1275  if (!is_compiled_by_c2()) return;
1276  // Could be gated by ProfileTraps, but do not bother...
1277  Method* m = method();
1278  if (m == NULL)  return;
1279  MethodData* mdo = m->method_data();
1280  if (mdo == NULL)  return;
1281  // There is a benign race here.  See comments in methodData.hpp.
1282  mdo->inc_decompile_count();
1283}
1284
1285void nmethod::increase_unloading_clock() {
1286  _global_unloading_clock++;
1287  if (_global_unloading_clock == 0) {
1288    // _nmethods are allocated with _unloading_clock == 0,
1289    // so 0 is never used as a clock value.
1290    _global_unloading_clock = 1;
1291  }
1292}
1293
1294void nmethod::set_unloading_clock(unsigned char unloading_clock) {
1295  OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock);
1296}
1297
1298unsigned char nmethod::unloading_clock() {
1299  return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock);
1300}
1301
1302void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
1303
1304  post_compiled_method_unload();
1305
1306  // Since this nmethod is being unloaded, make sure that dependencies
1307  // recorded in instanceKlasses get flushed and pass non-NULL closure to
1308  // indicate that this work is being done during a GC.
1309  assert(Universe::heap()->is_gc_active(), "should only be called during gc");
1310  assert(is_alive != NULL, "Should be non-NULL");
1311  // A non-NULL is_alive closure indicates that this is being called during GC.
1312  flush_dependencies(is_alive);
1313
1314  // Break cycle between nmethod & method
1315  if (TraceClassUnloading && WizardMode) {
1316    tty->print_cr("[Class unloading: Making nmethod " INTPTR_FORMAT
1317                  " unloadable], Method*(" INTPTR_FORMAT
1318                  "), cause(" INTPTR_FORMAT ")",
1319                  this, (address)_method, (address)cause);
1320    if (!Universe::heap()->is_gc_active())
1321      cause->klass()->print();
1322  }
1323  // Unlink the osr method, so we do not look this up again
1324  if (is_osr_method()) {
1325    invalidate_osr_method();
1326  }
1327  // If _method is already NULL the Method* is about to be unloaded,
1328  // so we don't have to break the cycle. Note that it is possible to
1329  // have the Method* live here, in case we unload the nmethod because
1330  // it is pointing to some oop (other than the Method*) being unloaded.
1331  if (_method != NULL) {
1332    // OSR methods point to the Method*, but the Method* does not
1333    // point back!
1334    if (_method->code() == this) {
1335      _method->clear_code(); // Break a cycle
1336    }
1337    _method = NULL;            // Clear the method of this dead nmethod
1338  }
1339  // Make the class unloaded - i.e., change state and notify sweeper
1340  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1341  if (is_in_use()) {
1342    // Transitioning directly from live to unloaded -- so
1343    // we need to force a cache clean-up; remember this
1344    // for later on.
1345    CodeCache::set_needs_cache_clean(true);
1346  }
1347
1348  // Unregister must be done before the state change
1349  Universe::heap()->unregister_nmethod(this);
1350
1351  _state = unloaded;
1352
1353  // Log the unloading.
1354  log_state_change();
1355
1356  // The Method* is gone at this point
1357  assert(_method == NULL, "Tautology");
1358
1359  set_osr_link(NULL);
1360  //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
1361  NMethodSweeper::report_state_change(this);
1362}
1363
1364void nmethod::invalidate_osr_method() {
1365  assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
1366  // Remove from list of active nmethods
1367  if (method() != NULL)
1368    method()->method_holder()->remove_osr_nmethod(this);
1369}
1370
1371void nmethod::log_state_change() const {
1372  if (LogCompilation) {
1373    if (xtty != NULL) {
1374      ttyLocker ttyl;  // keep the following output all in one block
1375      if (_state == unloaded) {
1376        xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
1377                         os::current_thread_id());
1378      } else {
1379        xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
1380                         os::current_thread_id(),
1381                         (_state == zombie ? " zombie='1'" : ""));
1382      }
1383      log_identity(xtty);
1384      xtty->stamp();
1385      xtty->end_elem();
1386    }
1387  }
1388  if (PrintCompilation && _state != unloaded) {
1389    print_on(tty, _state == zombie ? "made zombie" : "made not entrant");
1390  }
1391}
1392
1393/**
1394 * Common functionality for both make_not_entrant and make_zombie
1395 */
1396bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
1397  assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
1398  assert(!is_zombie(), "should not already be a zombie");
1399
1400  // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
1401  nmethodLocker nml(this);
1402  methodHandle the_method(method());
1403  No_Safepoint_Verifier nsv;
1404
1405  // during patching, depending on the nmethod state we must notify the GC that
1406  // code has been unloaded, unregistering it. We cannot do this right while
1407  // holding the Patching_lock because we need to use the CodeCache_lock. This
1408  // would be prone to deadlocks.
1409  // This flag is used to remember whether we need to later lock and unregister.
1410  bool nmethod_needs_unregister = false;
1411
1412  {
1413    // invalidate osr nmethod before acquiring the patching lock since
1414    // they both acquire leaf locks and we don't want a deadlock.
1415    // This logic is equivalent to the logic below for patching the
1416    // verified entry point of regular methods.
1417    if (is_osr_method()) {
1418      // this effectively makes the osr nmethod not entrant
1419      invalidate_osr_method();
1420    }
1421
1422    // Enter critical section.  Does not block for safepoint.
1423    MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
1424
1425    if (_state == state) {
1426      // another thread already performed this transition so nothing
1427      // to do, but return false to indicate this.
1428      return false;
1429    }
1430
1431    // The caller can be calling the method statically or through an inline
1432    // cache call.
1433    if (!is_osr_method() && !is_not_entrant()) {
1434      NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
1435                  SharedRuntime::get_handle_wrong_method_stub());
1436    }
1437
1438    if (is_in_use()) {
1439      // It's a true state change, so mark the method as decompiled.
1440      // Do it only for transition from alive.
1441      inc_decompile_count();
1442    }
1443
1444    // If the state is becoming a zombie, signal to unregister the nmethod with
1445    // the heap.
1446    // This nmethod may have already been unloaded during a full GC.
1447    if ((state == zombie) && !is_unloaded()) {
1448      nmethod_needs_unregister = true;
1449    }
1450
1451    // Must happen before state change. Otherwise we have a race condition in
1452    // nmethod::can_not_entrant_be_converted(). I.e., a method can immediately
1453    // transition its state from 'not_entrant' to 'zombie' without having to wait
1454    // for stack scanning.
1455    if (state == not_entrant) {
1456      mark_as_seen_on_stack();
1457      OrderAccess::storestore();
1458    }
1459
1460    // Change state
1461    _state = state;
1462
1463    // Log the transition once
1464    log_state_change();
1465
1466    // Remove nmethod from method.
1467    // We need to check if both the _code and _from_compiled_code_entry_point
1468    // refer to this nmethod because there is a race in setting these two fields
1469    // in Method* as seen in bugid 4947125.
1470    // If the vep() points to the zombie nmethod, the memory for the nmethod
1471    // could be flushed and the compiler and vtable stubs could still call
1472    // through it.
1473    if (method() != NULL && (method()->code() == this ||
1474                             method()->from_compiled_entry() == verified_entry_point())) {
1475      HandleMark hm;
1476      method()->clear_code();
1477    }
1478  } // leave critical region under Patching_lock
1479
1480  // When the nmethod becomes zombie it is no longer alive so the
1481  // dependencies must be flushed.  nmethods in the not_entrant
1482  // state will be flushed later when the transition to zombie
1483  // happens or they get unloaded.
1484  if (state == zombie) {
1485    {
1486      // Flushing dependecies must be done before any possible
1487      // safepoint can sneak in, otherwise the oops used by the
1488      // dependency logic could have become stale.
1489      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1490      if (nmethod_needs_unregister) {
1491        Universe::heap()->unregister_nmethod(this);
1492      }
1493      flush_dependencies(NULL);
1494    }
1495
1496    // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
1497    // event and it hasn't already been reported for this nmethod then
1498    // report it now. The event may have been reported earilier if the GC
1499    // marked it for unloading). JvmtiDeferredEventQueue support means
1500    // we no longer go to a safepoint here.
1501    post_compiled_method_unload();
1502
1503#ifdef ASSERT
1504    // It's no longer safe to access the oops section since zombie
1505    // nmethods aren't scanned for GC.
1506    _oops_are_stale = true;
1507#endif
1508     // the Method may be reclaimed by class unloading now that the
1509     // nmethod is in zombie state
1510    set_method(NULL);
1511  } else {
1512    assert(state == not_entrant, "other cases may need to be handled differently");
1513  }
1514
1515  if (TraceCreateZombies) {
1516    tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie");
1517  }
1518
1519  NMethodSweeper::report_state_change(this);
1520  return true;
1521}
1522
1523void nmethod::flush() {
1524  // Note that there are no valid oops in the nmethod anymore.
1525  assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
1526  assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
1527
1528  assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
1529  assert_locked_or_safepoint(CodeCache_lock);
1530
1531  // completely deallocate this method
1532  Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
1533  if (PrintMethodFlushing) {
1534    tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
1535        _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(_comp_level))/1024);
1536  }
1537
1538  // We need to deallocate any ExceptionCache data.
1539  // Note that we do not need to grab the nmethod lock for this, it
1540  // better be thread safe if we're disposing of it!
1541  ExceptionCache* ec = exception_cache();
1542  set_exception_cache(NULL);
1543  while(ec != NULL) {
1544    ExceptionCache* next = ec->next();
1545    delete ec;
1546    ec = next;
1547  }
1548
1549  if (on_scavenge_root_list()) {
1550    CodeCache::drop_scavenge_root_nmethod(this);
1551  }
1552
1553#ifdef SHARK
1554  ((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
1555#endif // SHARK
1556
1557  ((CodeBlob*)(this))->flush();
1558
1559  CodeCache::free(this);
1560}
1561
1562//
1563// Notify all classes this nmethod is dependent on that it is no
1564// longer dependent. This should only be called in two situations.
1565// First, when a nmethod transitions to a zombie all dependents need
1566// to be clear.  Since zombification happens at a safepoint there's no
1567// synchronization issues.  The second place is a little more tricky.
1568// During phase 1 of mark sweep class unloading may happen and as a
1569// result some nmethods may get unloaded.  In this case the flushing
1570// of dependencies must happen during phase 1 since after GC any
1571// dependencies in the unloaded nmethod won't be updated, so
1572// traversing the dependency information in unsafe.  In that case this
1573// function is called with a non-NULL argument and this function only
1574// notifies instanceKlasses that are reachable
1575
1576void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
1577  assert_locked_or_safepoint(CodeCache_lock);
1578  assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
1579  "is_alive is non-NULL if and only if we are called during GC");
1580  if (!has_flushed_dependencies()) {
1581    set_has_flushed_dependencies();
1582    for (Dependencies::DepStream deps(this); deps.next(); ) {
1583      Klass* klass = deps.context_type();
1584      if (klass == NULL)  continue;  // ignore things like evol_method
1585
1586      // During GC the is_alive closure is non-NULL, and is used to
1587      // determine liveness of dependees that need to be updated.
1588      if (is_alive == NULL || klass->is_loader_alive(is_alive)) {
1589        InstanceKlass::cast(klass)->remove_dependent_nmethod(this);
1590      }
1591    }
1592  }
1593}
1594
1595
1596// If this oop is not live, the nmethod can be unloaded.
1597bool nmethod::can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred) {
1598  assert(root != NULL, "just checking");
1599  oop obj = *root;
1600  if (obj == NULL || is_alive->do_object_b(obj)) {
1601      return false;
1602  }
1603
1604  // If ScavengeRootsInCode is true, an nmethod might be unloaded
1605  // simply because one of its constant oops has gone dead.
1606  // No actual classes need to be unloaded in order for this to occur.
1607  assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading");
1608  make_unloaded(is_alive, obj);
1609  return true;
1610}
1611
1612// ------------------------------------------------------------------
1613// post_compiled_method_load_event
1614// new method for install_code() path
1615// Transfer information from compilation to jvmti
1616void nmethod::post_compiled_method_load_event() {
1617
1618  Method* moop = method();
1619  HOTSPOT_COMPILED_METHOD_LOAD(
1620      (char *) moop->klass_name()->bytes(),
1621      moop->klass_name()->utf8_length(),
1622      (char *) moop->name()->bytes(),
1623      moop->name()->utf8_length(),
1624      (char *) moop->signature()->bytes(),
1625      moop->signature()->utf8_length(),
1626      insts_begin(), insts_size());
1627
1628  if (JvmtiExport::should_post_compiled_method_load() ||
1629      JvmtiExport::should_post_compiled_method_unload()) {
1630    get_and_cache_jmethod_id();
1631  }
1632
1633  if (JvmtiExport::should_post_compiled_method_load()) {
1634    // Let the Service thread (which is a real Java thread) post the event
1635    MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
1636    JvmtiDeferredEventQueue::enqueue(
1637      JvmtiDeferredEvent::compiled_method_load_event(this));
1638  }
1639}
1640
1641jmethodID nmethod::get_and_cache_jmethod_id() {
1642  if (_jmethod_id == NULL) {
1643    // Cache the jmethod_id since it can no longer be looked up once the
1644    // method itself has been marked for unloading.
1645    _jmethod_id = method()->jmethod_id();
1646  }
1647  return _jmethod_id;
1648}
1649
1650void nmethod::post_compiled_method_unload() {
1651  if (unload_reported()) {
1652    // During unloading we transition to unloaded and then to zombie
1653    // and the unloading is reported during the first transition.
1654    return;
1655  }
1656
1657  assert(_method != NULL && !is_unloaded(), "just checking");
1658  DTRACE_METHOD_UNLOAD_PROBE(method());
1659
1660  // If a JVMTI agent has enabled the CompiledMethodUnload event then
1661  // post the event. Sometime later this nmethod will be made a zombie
1662  // by the sweeper but the Method* will not be valid at that point.
1663  // If the _jmethod_id is null then no load event was ever requested
1664  // so don't bother posting the unload.  The main reason for this is
1665  // that the jmethodID is a weak reference to the Method* so if
1666  // it's being unloaded there's no way to look it up since the weak
1667  // ref will have been cleared.
1668  if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
1669    assert(!unload_reported(), "already unloaded");
1670    JvmtiDeferredEvent event =
1671      JvmtiDeferredEvent::compiled_method_unload_event(this,
1672          _jmethod_id, insts_begin());
1673    if (SafepointSynchronize::is_at_safepoint()) {
1674      // Don't want to take the queueing lock. Add it as pending and
1675      // it will get enqueued later.
1676      JvmtiDeferredEventQueue::add_pending_event(event);
1677    } else {
1678      MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
1679      JvmtiDeferredEventQueue::enqueue(event);
1680    }
1681  }
1682
1683  // The JVMTI CompiledMethodUnload event can be enabled or disabled at
1684  // any time. As the nmethod is being unloaded now we mark it has
1685  // having the unload event reported - this will ensure that we don't
1686  // attempt to report the event in the unlikely scenario where the
1687  // event is enabled at the time the nmethod is made a zombie.
1688  set_unload_reported();
1689}
1690
1691void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
1692  if (ic->is_icholder_call()) {
1693    // The only exception is compiledICHolder oops which may
1694    // yet be marked below. (We check this further below).
1695    CompiledICHolder* cichk_oop = ic->cached_icholder();
1696    if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
1697        cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
1698      return;
1699    }
1700  } else {
1701    Metadata* ic_oop = ic->cached_metadata();
1702    if (ic_oop != NULL) {
1703      if (ic_oop->is_klass()) {
1704        if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
1705          return;
1706        }
1707      } else if (ic_oop->is_method()) {
1708        if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
1709          return;
1710        }
1711      } else {
1712        ShouldNotReachHere();
1713      }
1714    }
1715  }
1716
1717  ic->set_to_clean();
1718}
1719
1720// This is called at the end of the strong tracing/marking phase of a
1721// GC to unload an nmethod if it contains otherwise unreachable
1722// oops.
1723
1724void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
1725  // Make sure the oop's ready to receive visitors
1726  assert(!is_zombie() && !is_unloaded(),
1727         "should not call follow on zombie or unloaded nmethod");
1728
1729  // If the method is not entrant then a JMP is plastered over the
1730  // first few bytes.  If an oop in the old code was there, that oop
1731  // should not get GC'd.  Skip the first few bytes of oops on
1732  // not-entrant methods.
1733  address low_boundary = verified_entry_point();
1734  if (is_not_entrant()) {
1735    low_boundary += NativeJump::instruction_size;
1736    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1737    // (See comment above.)
1738  }
1739
1740  // The RedefineClasses() API can cause the class unloading invariant
1741  // to no longer be true. See jvmtiExport.hpp for details.
1742  // Also, leave a debugging breadcrumb in local flag.
1743  bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
1744  if (a_class_was_redefined) {
1745    // This set of the unloading_occurred flag is done before the
1746    // call to post_compiled_method_unload() so that the unloading
1747    // of this nmethod is reported.
1748    unloading_occurred = true;
1749  }
1750
1751  // Exception cache
1752  clean_exception_cache(is_alive);
1753
1754  // If class unloading occurred we first iterate over all inline caches and
1755  // clear ICs where the cached oop is referring to an unloaded klass or method.
1756  // The remaining live cached oops will be traversed in the relocInfo::oop_type
1757  // iteration below.
1758  if (unloading_occurred) {
1759    RelocIterator iter(this, low_boundary);
1760    while(iter.next()) {
1761      if (iter.type() == relocInfo::virtual_call_type) {
1762        CompiledIC *ic = CompiledIC_at(&iter);
1763        clean_ic_if_metadata_is_dead(ic, is_alive);
1764      }
1765    }
1766  }
1767
1768  // Compiled code
1769  {
1770  RelocIterator iter(this, low_boundary);
1771  while (iter.next()) {
1772    if (iter.type() == relocInfo::oop_type) {
1773      oop_Relocation* r = iter.oop_reloc();
1774      // In this loop, we must only traverse those oops directly embedded in
1775      // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
1776      assert(1 == (r->oop_is_immediate()) +
1777                  (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1778             "oop must be found in exactly one place");
1779      if (r->oop_is_immediate() && r->oop_value() != NULL) {
1780        if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
1781          return;
1782        }
1783      }
1784    }
1785  }
1786  }
1787
1788
1789  // Scopes
1790  for (oop* p = oops_begin(); p < oops_end(); p++) {
1791    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
1792    if (can_unload(is_alive, p, unloading_occurred)) {
1793      return;
1794    }
1795  }
1796
1797  // Ensure that all metadata is still alive
1798  verify_metadata_loaders(low_boundary, is_alive);
1799}
1800
1801template <class CompiledICorStaticCall>
1802static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, nmethod* from) {
1803  // Ok, to lookup references to zombies here
1804  CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
1805  if (cb != NULL && cb->is_nmethod()) {
1806    nmethod* nm = (nmethod*)cb;
1807
1808    if (nm->unloading_clock() != nmethod::global_unloading_clock()) {
1809      // The nmethod has not been processed yet.
1810      return true;
1811    }
1812
1813    // Clean inline caches pointing to both zombie and not_entrant methods
1814    if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1815      ic->set_to_clean();
1816      assert(ic->is_clean(), err_msg("nmethod " PTR_FORMAT "not clean %s", from, from->method()->name_and_sig_as_C_string()));
1817    }
1818  }
1819
1820  return false;
1821}
1822
1823static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, nmethod* from) {
1824  return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
1825}
1826
1827static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, nmethod* from) {
1828  return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
1829}
1830
1831bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
1832  ResourceMark rm;
1833
1834  // Make sure the oop's ready to receive visitors
1835  assert(!is_zombie() && !is_unloaded(),
1836         "should not call follow on zombie or unloaded nmethod");
1837
1838  // If the method is not entrant then a JMP is plastered over the
1839  // first few bytes.  If an oop in the old code was there, that oop
1840  // should not get GC'd.  Skip the first few bytes of oops on
1841  // not-entrant methods.
1842  address low_boundary = verified_entry_point();
1843  if (is_not_entrant()) {
1844    low_boundary += NativeJump::instruction_size;
1845    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1846    // (See comment above.)
1847  }
1848
1849  // The RedefineClasses() API can cause the class unloading invariant
1850  // to no longer be true. See jvmtiExport.hpp for details.
1851  // Also, leave a debugging breadcrumb in local flag.
1852  bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
1853  if (a_class_was_redefined) {
1854    // This set of the unloading_occurred flag is done before the
1855    // call to post_compiled_method_unload() so that the unloading
1856    // of this nmethod is reported.
1857    unloading_occurred = true;
1858  }
1859
1860  // Exception cache
1861  clean_exception_cache(is_alive);
1862
1863  bool is_unloaded = false;
1864  bool postponed = false;
1865
1866  RelocIterator iter(this, low_boundary);
1867  while(iter.next()) {
1868
1869    switch (iter.type()) {
1870
1871    case relocInfo::virtual_call_type:
1872      if (unloading_occurred) {
1873        // If class unloading occurred we first iterate over all inline caches and
1874        // clear ICs where the cached oop is referring to an unloaded klass or method.
1875        clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
1876      }
1877
1878      postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1879      break;
1880
1881    case relocInfo::opt_virtual_call_type:
1882      postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1883      break;
1884
1885    case relocInfo::static_call_type:
1886      postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
1887      break;
1888
1889    case relocInfo::oop_type:
1890      if (!is_unloaded) {
1891        // Unload check
1892        oop_Relocation* r = iter.oop_reloc();
1893        // Traverse those oops directly embedded in the code.
1894        // Other oops (oop_index>0) are seen as part of scopes_oops.
1895        assert(1 == (r->oop_is_immediate()) +
1896                  (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1897              "oop must be found in exactly one place");
1898        if (r->oop_is_immediate() && r->oop_value() != NULL) {
1899          if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
1900            is_unloaded = true;
1901          }
1902        }
1903      }
1904      break;
1905
1906    }
1907  }
1908
1909  if (is_unloaded) {
1910    return postponed;
1911  }
1912
1913  // Scopes
1914  for (oop* p = oops_begin(); p < oops_end(); p++) {
1915    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
1916    if (can_unload(is_alive, p, unloading_occurred)) {
1917      is_unloaded = true;
1918      break;
1919    }
1920  }
1921
1922  if (is_unloaded) {
1923    return postponed;
1924  }
1925
1926  // Ensure that all metadata is still alive
1927  verify_metadata_loaders(low_boundary, is_alive);
1928
1929  return postponed;
1930}
1931
1932void nmethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {
1933  ResourceMark rm;
1934
1935  // Make sure the oop's ready to receive visitors
1936  assert(!is_zombie(),
1937         "should not call follow on zombie nmethod");
1938
1939  // If the method is not entrant then a JMP is plastered over the
1940  // first few bytes.  If an oop in the old code was there, that oop
1941  // should not get GC'd.  Skip the first few bytes of oops on
1942  // not-entrant methods.
1943  address low_boundary = verified_entry_point();
1944  if (is_not_entrant()) {
1945    low_boundary += NativeJump::instruction_size;
1946    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1947    // (See comment above.)
1948  }
1949
1950  RelocIterator iter(this, low_boundary);
1951  while(iter.next()) {
1952
1953    switch (iter.type()) {
1954
1955    case relocInfo::virtual_call_type:
1956      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1957      break;
1958
1959    case relocInfo::opt_virtual_call_type:
1960      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1961      break;
1962
1963    case relocInfo::static_call_type:
1964      clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
1965      break;
1966    }
1967  }
1968}
1969
1970#ifdef ASSERT
1971
1972class CheckClass : AllStatic {
1973  static BoolObjectClosure* _is_alive;
1974
1975  // Check class_loader is alive for this bit of metadata.
1976  static void check_class(Metadata* md) {
1977    Klass* klass = NULL;
1978    if (md->is_klass()) {
1979      klass = ((Klass*)md);
1980    } else if (md->is_method()) {
1981      klass = ((Method*)md)->method_holder();
1982    } else if (md->is_methodData()) {
1983      klass = ((MethodData*)md)->method()->method_holder();
1984    } else {
1985      md->print();
1986      ShouldNotReachHere();
1987    }
1988    assert(klass->is_loader_alive(_is_alive), "must be alive");
1989  }
1990 public:
1991  static void do_check_class(BoolObjectClosure* is_alive, nmethod* nm) {
1992    assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint");
1993    _is_alive = is_alive;
1994    nm->metadata_do(check_class);
1995  }
1996};
1997
1998// This is called during a safepoint so can use static data
1999BoolObjectClosure* CheckClass::_is_alive = NULL;
2000#endif // ASSERT
2001
2002
2003// Processing of oop references should have been sufficient to keep
2004// all strong references alive.  Any weak references should have been
2005// cleared as well.  Visit all the metadata and ensure that it's
2006// really alive.
2007void nmethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) {
2008#ifdef ASSERT
2009    RelocIterator iter(this, low_boundary);
2010    while (iter.next()) {
2011    // static_stub_Relocations may have dangling references to
2012    // Method*s so trim them out here.  Otherwise it looks like
2013    // compiled code is maintaining a link to dead metadata.
2014    address static_call_addr = NULL;
2015    if (iter.type() == relocInfo::opt_virtual_call_type) {
2016      CompiledIC* cic = CompiledIC_at(&iter);
2017      if (!cic->is_call_to_interpreted()) {
2018        static_call_addr = iter.addr();
2019      }
2020    } else if (iter.type() == relocInfo::static_call_type) {
2021      CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
2022      if (!csc->is_call_to_interpreted()) {
2023        static_call_addr = iter.addr();
2024      }
2025    }
2026    if (static_call_addr != NULL) {
2027      RelocIterator sciter(this, low_boundary);
2028      while (sciter.next()) {
2029        if (sciter.type() == relocInfo::static_stub_type &&
2030            sciter.static_stub_reloc()->static_call() == static_call_addr) {
2031          sciter.static_stub_reloc()->clear_inline_cache();
2032        }
2033      }
2034    }
2035  }
2036  // Check that the metadata embedded in the nmethod is alive
2037  CheckClass::do_check_class(is_alive, this);
2038#endif
2039}
2040
2041
2042// Iterate over metadata calling this function.   Used by RedefineClasses
2043void nmethod::metadata_do(void f(Metadata*)) {
2044  address low_boundary = verified_entry_point();
2045  if (is_not_entrant()) {
2046    low_boundary += NativeJump::instruction_size;
2047    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
2048    // (See comment above.)
2049  }
2050  {
2051    // Visit all immediate references that are embedded in the instruction stream.
2052    RelocIterator iter(this, low_boundary);
2053    while (iter.next()) {
2054      if (iter.type() == relocInfo::metadata_type ) {
2055        metadata_Relocation* r = iter.metadata_reloc();
2056        // In this lmetadata, we must only follow those metadatas directly embedded in
2057        // the code.  Other metadatas (oop_index>0) are seen as part of
2058        // the metadata section below.
2059        assert(1 == (r->metadata_is_immediate()) +
2060               (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
2061               "metadata must be found in exactly one place");
2062        if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
2063          Metadata* md = r->metadata_value();
2064          if (md != _method) f(md);
2065        }
2066      } else if (iter.type() == relocInfo::virtual_call_type) {
2067        // Check compiledIC holders associated with this nmethod
2068        CompiledIC *ic = CompiledIC_at(&iter);
2069        if (ic->is_icholder_call()) {
2070          CompiledICHolder* cichk = ic->cached_icholder();
2071          f(cichk->holder_method());
2072          f(cichk->holder_klass());
2073        } else {
2074          Metadata* ic_oop = ic->cached_metadata();
2075          if (ic_oop != NULL) {
2076            f(ic_oop);
2077          }
2078        }
2079      }
2080    }
2081  }
2082
2083  // Visit the metadata section
2084  for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
2085    if (*p == Universe::non_oop_word() || *p == NULL)  continue;  // skip non-oops
2086    Metadata* md = *p;
2087    f(md);
2088  }
2089
2090  // Call function Method*, not embedded in these other places.
2091  if (_method != NULL) f(_method);
2092}
2093
2094void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
2095  // make sure the oops ready to receive visitors
2096  assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
2097  assert(!is_unloaded(), "should not call follow on unloaded nmethod");
2098
2099  // If the method is not entrant or zombie then a JMP is plastered over the
2100  // first few bytes.  If an oop in the old code was there, that oop
2101  // should not get GC'd.  Skip the first few bytes of oops on
2102  // not-entrant methods.
2103  address low_boundary = verified_entry_point();
2104  if (is_not_entrant()) {
2105    low_boundary += NativeJump::instruction_size;
2106    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
2107    // (See comment above.)
2108  }
2109
2110  RelocIterator iter(this, low_boundary);
2111
2112  while (iter.next()) {
2113    if (iter.type() == relocInfo::oop_type ) {
2114      oop_Relocation* r = iter.oop_reloc();
2115      // In this loop, we must only follow those oops directly embedded in
2116      // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
2117      assert(1 == (r->oop_is_immediate()) +
2118                   (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
2119             "oop must be found in exactly one place");
2120      if (r->oop_is_immediate() && r->oop_value() != NULL) {
2121        f->do_oop(r->oop_addr());
2122      }
2123    }
2124  }
2125
2126  // Scopes
2127  // This includes oop constants not inlined in the code stream.
2128  for (oop* p = oops_begin(); p < oops_end(); p++) {
2129    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
2130    f->do_oop(p);
2131  }
2132}
2133
2134#define NMETHOD_SENTINEL ((nmethod*)badAddress)
2135
2136nmethod* volatile nmethod::_oops_do_mark_nmethods;
2137
2138// An nmethod is "marked" if its _mark_link is set non-null.
2139// Even if it is the end of the linked list, it will have a non-null link value,
2140// as long as it is on the list.
2141// This code must be MP safe, because it is used from parallel GC passes.
2142bool nmethod::test_set_oops_do_mark() {
2143  assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
2144  nmethod* observed_mark_link = _oops_do_mark_link;
2145  if (observed_mark_link == NULL) {
2146    // Claim this nmethod for this thread to mark.
2147    observed_mark_link = (nmethod*)
2148      Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_link, NULL);
2149    if (observed_mark_link == NULL) {
2150
2151      // Atomically append this nmethod (now claimed) to the head of the list:
2152      nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
2153      for (;;) {
2154        nmethod* required_mark_nmethods = observed_mark_nmethods;
2155        _oops_do_mark_link = required_mark_nmethods;
2156        observed_mark_nmethods = (nmethod*)
2157          Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods);
2158        if (observed_mark_nmethods == required_mark_nmethods)
2159          break;
2160      }
2161      // Mark was clear when we first saw this guy.
2162      NOT_PRODUCT(if (TraceScavenge)  print_on(tty, "oops_do, mark"));
2163      return false;
2164    }
2165  }
2166  // On fall through, another racing thread marked this nmethod before we did.
2167  return true;
2168}
2169
2170void nmethod::oops_do_marking_prologue() {
2171  NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("[oops_do_marking_prologue"));
2172  assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
2173  // We use cmpxchg_ptr instead of regular assignment here because the user
2174  // may fork a bunch of threads, and we need them all to see the same state.
2175  void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL);
2176  guarantee(observed == NULL, "no races in this sequential code");
2177}
2178
2179void nmethod::oops_do_marking_epilogue() {
2180  assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row");
2181  nmethod* cur = _oops_do_mark_nmethods;
2182  while (cur != NMETHOD_SENTINEL) {
2183    assert(cur != NULL, "not NULL-terminated");
2184    nmethod* next = cur->_oops_do_mark_link;
2185    cur->_oops_do_mark_link = NULL;
2186    cur->verify_oop_relocations();
2187    NOT_PRODUCT(if (TraceScavenge)  cur->print_on(tty, "oops_do, unmark"));
2188    cur = next;
2189  }
2190  void* required = _oops_do_mark_nmethods;
2191  void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
2192  guarantee(observed == required, "no races in this sequential code");
2193  NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("oops_do_marking_epilogue]"));
2194}
2195
2196class DetectScavengeRoot: public OopClosure {
2197  bool     _detected_scavenge_root;
2198public:
2199  DetectScavengeRoot() : _detected_scavenge_root(false)
2200  { NOT_PRODUCT(_print_nm = NULL); }
2201  bool detected_scavenge_root() { return _detected_scavenge_root; }
2202  virtual void do_oop(oop* p) {
2203    if ((*p) != NULL && (*p)->is_scavengable()) {
2204      NOT_PRODUCT(maybe_print(p));
2205      _detected_scavenge_root = true;
2206    }
2207  }
2208  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2209
2210#ifndef PRODUCT
2211  nmethod* _print_nm;
2212  void maybe_print(oop* p) {
2213    if (_print_nm == NULL)  return;
2214    if (!_detected_scavenge_root)  _print_nm->print_on(tty, "new scavenge root");
2215    tty->print_cr(""PTR_FORMAT"[offset=%d] detected scavengable oop "PTR_FORMAT" (found at "PTR_FORMAT")",
2216                  _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),
2217                  (void *)(*p), (intptr_t)p);
2218    (*p)->print();
2219  }
2220#endif //PRODUCT
2221};
2222
2223bool nmethod::detect_scavenge_root_oops() {
2224  DetectScavengeRoot detect_scavenge_root;
2225  NOT_PRODUCT(if (TraceScavenge)  detect_scavenge_root._print_nm = this);
2226  oops_do(&detect_scavenge_root);
2227  return detect_scavenge_root.detected_scavenge_root();
2228}
2229
2230// Method that knows how to preserve outgoing arguments at call. This method must be
2231// called with a frame corresponding to a Java invoke
2232void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
2233#ifndef SHARK
2234  if (!method()->is_native()) {
2235    SimpleScopeDesc ssd(this, fr.pc());
2236    Bytecode_invoke call(ssd.method(), ssd.bci());
2237    bool has_receiver = call.has_receiver();
2238    bool has_appendix = call.has_appendix();
2239    Symbol* signature = call.signature();
2240    fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
2241  }
2242#endif // !SHARK
2243}
2244
2245
2246oop nmethod::embeddedOop_at(u_char* p) {
2247  RelocIterator iter(this, p, p + 1);
2248  while (iter.next())
2249    if (iter.type() == relocInfo::oop_type) {
2250      return iter.oop_reloc()->oop_value();
2251    }
2252  return NULL;
2253}
2254
2255
2256inline bool includes(void* p, void* from, void* to) {
2257  return from <= p && p < to;
2258}
2259
2260
2261void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {
2262  assert(count >= 2, "must be sentinel values, at least");
2263
2264#ifdef ASSERT
2265  // must be sorted and unique; we do a binary search in find_pc_desc()
2266  int prev_offset = pcs[0].pc_offset();
2267  assert(prev_offset == PcDesc::lower_offset_limit,
2268         "must start with a sentinel");
2269  for (int i = 1; i < count; i++) {
2270    int this_offset = pcs[i].pc_offset();
2271    assert(this_offset > prev_offset, "offsets must be sorted");
2272    prev_offset = this_offset;
2273  }
2274  assert(prev_offset == PcDesc::upper_offset_limit,
2275         "must end with a sentinel");
2276#endif //ASSERT
2277
2278  // Search for MethodHandle invokes and tag the nmethod.
2279  for (int i = 0; i < count; i++) {
2280    if (pcs[i].is_method_handle_invoke()) {
2281      set_has_method_handle_invokes(true);
2282      break;
2283    }
2284  }
2285  assert(has_method_handle_invokes() == (_deoptimize_mh_offset != -1), "must have deopt mh handler");
2286
2287  int size = count * sizeof(PcDesc);
2288  assert(scopes_pcs_size() >= size, "oob");
2289  memcpy(scopes_pcs_begin(), pcs, size);
2290
2291  // Adjust the final sentinel downward.
2292  PcDesc* last_pc = &scopes_pcs_begin()[count-1];
2293  assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity");
2294  last_pc->set_pc_offset(content_size() + 1);
2295  for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) {
2296    // Fill any rounding gaps with copies of the last record.
2297    last_pc[1] = last_pc[0];
2298  }
2299  // The following assert could fail if sizeof(PcDesc) is not
2300  // an integral multiple of oopSize (the rounding term).
2301  // If it fails, change the logic to always allocate a multiple
2302  // of sizeof(PcDesc), and fill unused words with copies of *last_pc.
2303  assert(last_pc + 1 == scopes_pcs_end(), "must match exactly");
2304}
2305
2306void nmethod::copy_scopes_data(u_char* buffer, int size) {
2307  assert(scopes_data_size() >= size, "oob");
2308  memcpy(scopes_data_begin(), buffer, size);
2309}
2310
2311
2312#ifdef ASSERT
2313static PcDesc* linear_search(nmethod* nm, int pc_offset, bool approximate) {
2314  PcDesc* lower = nm->scopes_pcs_begin();
2315  PcDesc* upper = nm->scopes_pcs_end();
2316  lower += 1; // exclude initial sentinel
2317  PcDesc* res = NULL;
2318  for (PcDesc* p = lower; p < upper; p++) {
2319    NOT_PRODUCT(--nmethod_stats.pc_desc_tests);  // don't count this call to match_desc
2320    if (match_desc(p, pc_offset, approximate)) {
2321      if (res == NULL)
2322        res = p;
2323      else
2324        res = (PcDesc*) badAddress;
2325    }
2326  }
2327  return res;
2328}
2329#endif
2330
2331
2332// Finds a PcDesc with real-pc equal to "pc"
2333PcDesc* nmethod::find_pc_desc_internal(address pc, bool approximate) {
2334  address base_address = code_begin();
2335  if ((pc < base_address) ||
2336      (pc - base_address) >= (ptrdiff_t) PcDesc::upper_offset_limit) {
2337    return NULL;  // PC is wildly out of range
2338  }
2339  int pc_offset = (int) (pc - base_address);
2340
2341  // Check the PcDesc cache if it contains the desired PcDesc
2342  // (This as an almost 100% hit rate.)
2343  PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate);
2344  if (res != NULL) {
2345    assert(res == linear_search(this, pc_offset, approximate), "cache ok");
2346    return res;
2347  }
2348
2349  // Fallback algorithm: quasi-linear search for the PcDesc
2350  // Find the last pc_offset less than the given offset.
2351  // The successor must be the required match, if there is a match at all.
2352  // (Use a fixed radix to avoid expensive affine pointer arithmetic.)
2353  PcDesc* lower = scopes_pcs_begin();
2354  PcDesc* upper = scopes_pcs_end();
2355  upper -= 1; // exclude final sentinel
2356  if (lower >= upper)  return NULL;  // native method; no PcDescs at all
2357
2358#define assert_LU_OK \
2359  /* invariant on lower..upper during the following search: */ \
2360  assert(lower->pc_offset() <  pc_offset, "sanity"); \
2361  assert(upper->pc_offset() >= pc_offset, "sanity")
2362  assert_LU_OK;
2363
2364  // Use the last successful return as a split point.
2365  PcDesc* mid = _pc_desc_cache.last_pc_desc();
2366  NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
2367  if (mid->pc_offset() < pc_offset) {
2368    lower = mid;
2369  } else {
2370    upper = mid;
2371  }
2372
2373  // Take giant steps at first (4096, then 256, then 16, then 1)
2374  const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ debug_only(-1);
2375  const int RADIX = (1 << LOG2_RADIX);
2376  for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) {
2377    while ((mid = lower + step) < upper) {
2378      assert_LU_OK;
2379      NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
2380      if (mid->pc_offset() < pc_offset) {
2381        lower = mid;
2382      } else {
2383        upper = mid;
2384        break;
2385      }
2386    }
2387    assert_LU_OK;
2388  }
2389
2390  // Sneak up on the value with a linear search of length ~16.
2391  while (true) {
2392    assert_LU_OK;
2393    mid = lower + 1;
2394    NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
2395    if (mid->pc_offset() < pc_offset) {
2396      lower = mid;
2397    } else {
2398      upper = mid;
2399      break;
2400    }
2401  }
2402#undef assert_LU_OK
2403
2404  if (match_desc(upper, pc_offset, approximate)) {
2405    assert(upper == linear_search(this, pc_offset, approximate), "search ok");
2406    _pc_desc_cache.add_pc_desc(upper);
2407    return upper;
2408  } else {
2409    assert(NULL == linear_search(this, pc_offset, approximate), "search ok");
2410    return NULL;
2411  }
2412}
2413
2414
2415void nmethod::check_all_dependencies(DepChange& changes) {
2416  // Checked dependencies are allocated into this ResourceMark
2417  ResourceMark rm;
2418
2419  // Turn off dependency tracing while actually testing dependencies.
2420  NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
2421
2422  typedef ResourceHashtable<DependencySignature, int, &DependencySignature::hash,
2423                            &DependencySignature::equals, 11027> DepTable;
2424
2425  DepTable* table = new DepTable();
2426
2427  // Iterate over live nmethods and check dependencies of all nmethods that are not
2428  // marked for deoptimization. A particular dependency is only checked once.
2429  NMethodIterator iter;
2430  while(iter.next()) {
2431    nmethod* nm = iter.method();
2432    // Only notify for live nmethods
2433    if (nm->is_alive() && !nm->is_marked_for_deoptimization()) {
2434      for (Dependencies::DepStream deps(nm); deps.next(); ) {
2435        // Construct abstraction of a dependency.
2436        DependencySignature* current_sig = new DependencySignature(deps);
2437
2438        // Determine if dependency is already checked. table->put(...) returns
2439        // 'true' if the dependency is added (i.e., was not in the hashtable).
2440        if (table->put(*current_sig, 1)) {
2441          if (deps.check_dependency() != NULL) {
2442            // Dependency checking failed. Print out information about the failed
2443            // dependency and finally fail with an assert. We can fail here, since
2444            // dependency checking is never done in a product build.
2445            changes.print();
2446            nm->print();
2447            nm->print_dependencies();
2448            assert(false, "Should have been marked for deoptimization");
2449          }
2450        }
2451      }
2452    }
2453  }
2454}
2455
2456bool nmethod::check_dependency_on(DepChange& changes) {
2457  // What has happened:
2458  // 1) a new class dependee has been added
2459  // 2) dependee and all its super classes have been marked
2460  bool found_check = false;  // set true if we are upset
2461  for (Dependencies::DepStream deps(this); deps.next(); ) {
2462    // Evaluate only relevant dependencies.
2463    if (deps.spot_check_dependency_at(changes) != NULL) {
2464      found_check = true;
2465      NOT_DEBUG(break);
2466    }
2467  }
2468  return found_check;
2469}
2470
2471bool nmethod::is_evol_dependent_on(Klass* dependee) {
2472  InstanceKlass *dependee_ik = InstanceKlass::cast(dependee);
2473  Array<Method*>* dependee_methods = dependee_ik->methods();
2474  for (Dependencies::DepStream deps(this); deps.next(); ) {
2475    if (deps.type() == Dependencies::evol_method) {
2476      Method* method = deps.method_argument(0);
2477      for (int j = 0; j < dependee_methods->length(); j++) {
2478        if (dependee_methods->at(j) == method) {
2479          // RC_TRACE macro has an embedded ResourceMark
2480          RC_TRACE(0x01000000,
2481            ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)",
2482            _method->method_holder()->external_name(),
2483            _method->name()->as_C_string(),
2484            _method->signature()->as_C_string(), compile_id(),
2485            method->method_holder()->external_name(),
2486            method->name()->as_C_string(),
2487            method->signature()->as_C_string()));
2488          if (TraceDependencies || LogCompilation)
2489            deps.log_dependency(dependee);
2490          return true;
2491        }
2492      }
2493    }
2494  }
2495  return false;
2496}
2497
2498// Called from mark_for_deoptimization, when dependee is invalidated.
2499bool nmethod::is_dependent_on_method(Method* dependee) {
2500  for (Dependencies::DepStream deps(this); deps.next(); ) {
2501    if (deps.type() != Dependencies::evol_method)
2502      continue;
2503    Method* method = deps.method_argument(0);
2504    if (method == dependee) return true;
2505  }
2506  return false;
2507}
2508
2509
2510bool nmethod::is_patchable_at(address instr_addr) {
2511  assert(insts_contains(instr_addr), "wrong nmethod used");
2512  if (is_zombie()) {
2513    // a zombie may never be patched
2514    return false;
2515  }
2516  return true;
2517}
2518
2519
2520address nmethod::continuation_for_implicit_exception(address pc) {
2521  // Exception happened outside inline-cache check code => we are inside
2522  // an active nmethod => use cpc to determine a return address
2523  int exception_offset = pc - code_begin();
2524  int cont_offset = ImplicitExceptionTable(this).at( exception_offset );
2525#ifdef ASSERT
2526  if (cont_offset == 0) {
2527    Thread* thread = ThreadLocalStorage::get_thread_slow();
2528    ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY
2529    HandleMark hm(thread);
2530    ResourceMark rm(thread);
2531    CodeBlob* cb = CodeCache::find_blob(pc);
2532    assert(cb != NULL && cb == this, "");
2533    tty->print_cr("implicit exception happened at " INTPTR_FORMAT, pc);
2534    print();
2535    method()->print_codes();
2536    print_code();
2537    print_pcs();
2538  }
2539#endif
2540  if (cont_offset == 0) {
2541    // Let the normal error handling report the exception
2542    return NULL;
2543  }
2544  return code_begin() + cont_offset;
2545}
2546
2547
2548
2549void nmethod_init() {
2550  // make sure you didn't forget to adjust the filler fields
2551  assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
2552}
2553
2554
2555//-------------------------------------------------------------------------------------------
2556
2557
2558// QQQ might we make this work from a frame??
2559nmethodLocker::nmethodLocker(address pc) {
2560  CodeBlob* cb = CodeCache::find_blob(pc);
2561  guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found");
2562  _nm = (nmethod*)cb;
2563  lock_nmethod(_nm);
2564}
2565
2566// Only JvmtiDeferredEvent::compiled_method_unload_event()
2567// should pass zombie_ok == true.
2568void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) {
2569  if (nm == NULL)  return;
2570  Atomic::inc(&nm->_lock_count);
2571  assert(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method");
2572}
2573
2574void nmethodLocker::unlock_nmethod(nmethod* nm) {
2575  if (nm == NULL)  return;
2576  Atomic::dec(&nm->_lock_count);
2577  assert(nm->_lock_count >= 0, "unmatched nmethod lock/unlock");
2578}
2579
2580
2581// -----------------------------------------------------------------------------
2582// nmethod::get_deopt_original_pc
2583//
2584// Return the original PC for the given PC if:
2585// (a) the given PC belongs to a nmethod and
2586// (b) it is a deopt PC
2587address nmethod::get_deopt_original_pc(const frame* fr) {
2588  if (fr->cb() == NULL)  return NULL;
2589
2590  nmethod* nm = fr->cb()->as_nmethod_or_null();
2591  if (nm != NULL && nm->is_deopt_pc(fr->pc()))
2592    return nm->get_original_pc(fr);
2593
2594  return NULL;
2595}
2596
2597
2598// -----------------------------------------------------------------------------
2599// MethodHandle
2600
2601bool nmethod::is_method_handle_return(address return_pc) {
2602  if (!has_method_handle_invokes())  return false;
2603  PcDesc* pd = pc_desc_at(return_pc);
2604  if (pd == NULL)
2605    return false;
2606  return pd->is_method_handle_invoke();
2607}
2608
2609
2610// -----------------------------------------------------------------------------
2611// Verification
2612
2613class VerifyOopsClosure: public OopClosure {
2614  nmethod* _nm;
2615  bool     _ok;
2616public:
2617  VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { }
2618  bool ok() { return _ok; }
2619  virtual void do_oop(oop* p) {
2620    if ((*p) == NULL || (*p)->is_oop())  return;
2621    if (_ok) {
2622      _nm->print_nmethod(true);
2623      _ok = false;
2624    }
2625    tty->print_cr("*** non-oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
2626                  (void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
2627  }
2628  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2629};
2630
2631void nmethod::verify() {
2632
2633  // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
2634  // seems odd.
2635
2636  if( is_zombie() || is_not_entrant() )
2637    return;
2638
2639  // Make sure all the entry points are correctly aligned for patching.
2640  NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());
2641
2642  // assert(method()->is_oop(), "must be valid");
2643
2644  ResourceMark rm;
2645
2646  if (!CodeCache::contains(this)) {
2647    fatal(err_msg("nmethod at " INTPTR_FORMAT " not in zone", this));
2648  }
2649
2650  if(is_native_method() )
2651    return;
2652
2653  nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
2654  if (nm != this) {
2655    fatal(err_msg("findNMethod did not find this nmethod (" INTPTR_FORMAT ")",
2656                  this));
2657  }
2658
2659  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2660    if (! p->verify(this)) {
2661      tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", this);
2662    }
2663  }
2664
2665  VerifyOopsClosure voc(this);
2666  oops_do(&voc);
2667  assert(voc.ok(), "embedded oops must be OK");
2668  verify_scavenge_root_oops();
2669
2670  verify_scopes();
2671}
2672
2673
2674void nmethod::verify_interrupt_point(address call_site) {
2675  // Verify IC only when nmethod installation is finished.
2676  bool is_installed = (method()->code() == this) // nmethod is in state 'in_use' and installed
2677                      || !this->is_in_use();     // nmethod is installed, but not in 'in_use' state
2678  if (is_installed) {
2679    Thread *cur = Thread::current();
2680    if (CompiledIC_lock->owner() == cur ||
2681        ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) &&
2682         SafepointSynchronize::is_at_safepoint())) {
2683      CompiledIC_at(this, call_site);
2684      CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
2685    } else {
2686      MutexLocker ml_verify (CompiledIC_lock);
2687      CompiledIC_at(this, call_site);
2688    }
2689  }
2690
2691  PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address());
2692  assert(pd != NULL, "PcDesc must exist");
2693  for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(),
2694                                     pd->obj_decode_offset(), pd->should_reexecute(),
2695                                     pd->return_oop());
2696       !sd->is_top(); sd = sd->sender()) {
2697    sd->verify();
2698  }
2699}
2700
2701void nmethod::verify_scopes() {
2702  if( !method() ) return;       // Runtime stubs have no scope
2703  if (method()->is_native()) return; // Ignore stub methods.
2704  // iterate through all interrupt point
2705  // and verify the debug information is valid.
2706  RelocIterator iter((nmethod*)this);
2707  while (iter.next()) {
2708    address stub = NULL;
2709    switch (iter.type()) {
2710      case relocInfo::virtual_call_type:
2711        verify_interrupt_point(iter.addr());
2712        break;
2713      case relocInfo::opt_virtual_call_type:
2714        stub = iter.opt_virtual_call_reloc()->static_stub();
2715        verify_interrupt_point(iter.addr());
2716        break;
2717      case relocInfo::static_call_type:
2718        stub = iter.static_call_reloc()->static_stub();
2719        //verify_interrupt_point(iter.addr());
2720        break;
2721      case relocInfo::runtime_call_type:
2722        address destination = iter.reloc()->value();
2723        // Right now there is no way to find out which entries support
2724        // an interrupt point.  It would be nice if we had this
2725        // information in a table.
2726        break;
2727    }
2728    assert(stub == NULL || stub_contains(stub), "static call stub outside stub section");
2729  }
2730}
2731
2732
2733// -----------------------------------------------------------------------------
2734// Non-product code
2735#ifndef PRODUCT
2736
2737class DebugScavengeRoot: public OopClosure {
2738  nmethod* _nm;
2739  bool     _ok;
2740public:
2741  DebugScavengeRoot(nmethod* nm) : _nm(nm), _ok(true) { }
2742  bool ok() { return _ok; }
2743  virtual void do_oop(oop* p) {
2744    if ((*p) == NULL || !(*p)->is_scavengable())  return;
2745    if (_ok) {
2746      _nm->print_nmethod(true);
2747      _ok = false;
2748    }
2749    tty->print_cr("*** scavengable oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
2750                  (void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
2751    (*p)->print();
2752  }
2753  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2754};
2755
2756void nmethod::verify_scavenge_root_oops() {
2757  if (UseG1GC) {
2758    return;
2759  }
2760
2761  if (!on_scavenge_root_list()) {
2762    // Actually look inside, to verify the claim that it's clean.
2763    DebugScavengeRoot debug_scavenge_root(this);
2764    oops_do(&debug_scavenge_root);
2765    if (!debug_scavenge_root.ok())
2766      fatal("found an unadvertised bad scavengable oop in the code cache");
2767  }
2768  assert(scavenge_root_not_marked(), "");
2769}
2770
2771#endif // PRODUCT
2772
2773// Printing operations
2774
2775void nmethod::print() const {
2776  ResourceMark rm;
2777  ttyLocker ttyl;   // keep the following output all in one block
2778
2779  tty->print("Compiled method ");
2780
2781  if (is_compiled_by_c1()) {
2782    tty->print("(c1) ");
2783  } else if (is_compiled_by_c2()) {
2784    tty->print("(c2) ");
2785  } else if (is_compiled_by_shark()) {
2786    tty->print("(shark) ");
2787  } else {
2788    tty->print("(nm) ");
2789  }
2790
2791  print_on(tty, NULL);
2792
2793  if (WizardMode) {
2794    tty->print("((nmethod*) "INTPTR_FORMAT ") ", this);
2795    tty->print(" for method " INTPTR_FORMAT , (address)method());
2796    tty->print(" { ");
2797    if (is_in_use())      tty->print("in_use ");
2798    if (is_not_entrant()) tty->print("not_entrant ");
2799    if (is_zombie())      tty->print("zombie ");
2800    if (is_unloaded())    tty->print("unloaded ");
2801    if (on_scavenge_root_list())  tty->print("scavenge_root ");
2802    tty->print_cr("}:");
2803  }
2804  if (size              () > 0) tty->print_cr(" total in heap  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2805                                              (address)this,
2806                                              (address)this + size(),
2807                                              size());
2808  if (relocation_size   () > 0) tty->print_cr(" relocation     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2809                                              relocation_begin(),
2810                                              relocation_end(),
2811                                              relocation_size());
2812  if (consts_size       () > 0) tty->print_cr(" constants      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2813                                              consts_begin(),
2814                                              consts_end(),
2815                                              consts_size());
2816  if (insts_size        () > 0) tty->print_cr(" main code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2817                                              insts_begin(),
2818                                              insts_end(),
2819                                              insts_size());
2820  if (stub_size         () > 0) tty->print_cr(" stub code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2821                                              stub_begin(),
2822                                              stub_end(),
2823                                              stub_size());
2824  if (oops_size         () > 0) tty->print_cr(" oops           [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2825                                              oops_begin(),
2826                                              oops_end(),
2827                                              oops_size());
2828  if (metadata_size      () > 0) tty->print_cr(" metadata       [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2829                                              metadata_begin(),
2830                                              metadata_end(),
2831                                              metadata_size());
2832  if (scopes_data_size  () > 0) tty->print_cr(" scopes data    [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2833                                              scopes_data_begin(),
2834                                              scopes_data_end(),
2835                                              scopes_data_size());
2836  if (scopes_pcs_size   () > 0) tty->print_cr(" scopes pcs     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2837                                              scopes_pcs_begin(),
2838                                              scopes_pcs_end(),
2839                                              scopes_pcs_size());
2840  if (dependencies_size () > 0) tty->print_cr(" dependencies   [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2841                                              dependencies_begin(),
2842                                              dependencies_end(),
2843                                              dependencies_size());
2844  if (handler_table_size() > 0) tty->print_cr(" handler table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2845                                              handler_table_begin(),
2846                                              handler_table_end(),
2847                                              handler_table_size());
2848  if (nul_chk_table_size() > 0) tty->print_cr(" nul chk table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2849                                              nul_chk_table_begin(),
2850                                              nul_chk_table_end(),
2851                                              nul_chk_table_size());
2852}
2853
2854void nmethod::print_code() {
2855  HandleMark hm;
2856  ResourceMark m;
2857  Disassembler::decode(this);
2858}
2859
2860
2861#ifndef PRODUCT
2862
2863void nmethod::print_scopes() {
2864  // Find the first pc desc for all scopes in the code and print it.
2865  ResourceMark rm;
2866  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2867    if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)
2868      continue;
2869
2870    ScopeDesc* sd = scope_desc_at(p->real_pc(this));
2871    sd->print_on(tty, p);
2872  }
2873}
2874
2875void nmethod::print_dependencies() {
2876  ResourceMark rm;
2877  ttyLocker ttyl;   // keep the following output all in one block
2878  tty->print_cr("Dependencies:");
2879  for (Dependencies::DepStream deps(this); deps.next(); ) {
2880    deps.print_dependency();
2881    Klass* ctxk = deps.context_type();
2882    if (ctxk != NULL) {
2883      if (ctxk->oop_is_instance() && ((InstanceKlass*)ctxk)->is_dependent_nmethod(this)) {
2884        tty->print_cr("   [nmethod<=klass]%s", ctxk->external_name());
2885      }
2886    }
2887    deps.log_dependency();  // put it into the xml log also
2888  }
2889}
2890
2891
2892void nmethod::print_relocations() {
2893  ResourceMark m;       // in case methods get printed via the debugger
2894  tty->print_cr("relocations:");
2895  RelocIterator iter(this);
2896  iter.print();
2897  if (UseRelocIndex) {
2898    jint* index_end   = (jint*)relocation_end() - 1;
2899    jint  index_size  = *index_end;
2900    jint* index_start = (jint*)( (address)index_end - index_size );
2901    tty->print_cr("    index @" INTPTR_FORMAT ": index_size=%d", index_start, index_size);
2902    if (index_size > 0) {
2903      jint* ip;
2904      for (ip = index_start; ip+2 <= index_end; ip += 2)
2905        tty->print_cr("  (%d %d) addr=" INTPTR_FORMAT " @" INTPTR_FORMAT,
2906                      ip[0],
2907                      ip[1],
2908                      header_end()+ip[0],
2909                      relocation_begin()-1+ip[1]);
2910      for (; ip < index_end; ip++)
2911        tty->print_cr("  (%d ?)", ip[0]);
2912      tty->print_cr("          @" INTPTR_FORMAT ": index_size=%d", ip, *ip);
2913      ip++;
2914      tty->print_cr("reloc_end @" INTPTR_FORMAT ":", ip);
2915    }
2916  }
2917}
2918
2919
2920void nmethod::print_pcs() {
2921  ResourceMark m;       // in case methods get printed via debugger
2922  tty->print_cr("pc-bytecode offsets:");
2923  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2924    p->print(this);
2925  }
2926}
2927
2928#endif // PRODUCT
2929
2930const char* nmethod::reloc_string_for(u_char* begin, u_char* end) {
2931  RelocIterator iter(this, begin, end);
2932  bool have_one = false;
2933  while (iter.next()) {
2934    have_one = true;
2935    switch (iter.type()) {
2936        case relocInfo::none:                  return "no_reloc";
2937        case relocInfo::oop_type: {
2938          stringStream st;
2939          oop_Relocation* r = iter.oop_reloc();
2940          oop obj = r->oop_value();
2941          st.print("oop(");
2942          if (obj == NULL) st.print("NULL");
2943          else obj->print_value_on(&st);
2944          st.print(")");
2945          return st.as_string();
2946        }
2947        case relocInfo::metadata_type: {
2948          stringStream st;
2949          metadata_Relocation* r = iter.metadata_reloc();
2950          Metadata* obj = r->metadata_value();
2951          st.print("metadata(");
2952          if (obj == NULL) st.print("NULL");
2953          else obj->print_value_on(&st);
2954          st.print(")");
2955          return st.as_string();
2956        }
2957        case relocInfo::virtual_call_type:     return "virtual_call";
2958        case relocInfo::opt_virtual_call_type: return "optimized virtual_call";
2959        case relocInfo::static_call_type:      return "static_call";
2960        case relocInfo::static_stub_type:      return "static_stub";
2961        case relocInfo::runtime_call_type:     return "runtime_call";
2962        case relocInfo::external_word_type:    return "external_word";
2963        case relocInfo::internal_word_type:    return "internal_word";
2964        case relocInfo::section_word_type:     return "section_word";
2965        case relocInfo::poll_type:             return "poll";
2966        case relocInfo::poll_return_type:      return "poll_return";
2967        case relocInfo::type_mask:             return "type_bit_mask";
2968    }
2969  }
2970  return have_one ? "other" : NULL;
2971}
2972
2973// Return a the last scope in (begin..end]
2974ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
2975  PcDesc* p = pc_desc_near(begin+1);
2976  if (p != NULL && p->real_pc(this) <= end) {
2977    return new ScopeDesc(this, p->scope_decode_offset(),
2978                         p->obj_decode_offset(), p->should_reexecute(),
2979                         p->return_oop());
2980  }
2981  return NULL;
2982}
2983
2984void nmethod::print_nmethod_labels(outputStream* stream, address block_begin) const {
2985  if (block_begin == entry_point())             stream->print_cr("[Entry Point]");
2986  if (block_begin == verified_entry_point())    stream->print_cr("[Verified Entry Point]");
2987  if (block_begin == exception_begin())         stream->print_cr("[Exception Handler]");
2988  if (block_begin == stub_begin())              stream->print_cr("[Stub Code]");
2989  if (block_begin == deopt_handler_begin())     stream->print_cr("[Deopt Handler Code]");
2990
2991  if (has_method_handle_invokes())
2992    if (block_begin == deopt_mh_handler_begin())  stream->print_cr("[Deopt MH Handler Code]");
2993
2994  if (block_begin == consts_begin())            stream->print_cr("[Constants]");
2995
2996  if (block_begin == entry_point()) {
2997    methodHandle m = method();
2998    if (m.not_null()) {
2999      stream->print("  # ");
3000      m->print_value_on(stream);
3001      stream->cr();
3002    }
3003    if (m.not_null() && !is_osr_method()) {
3004      ResourceMark rm;
3005      int sizeargs = m->size_of_parameters();
3006      BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
3007      VMRegPair* regs   = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
3008      {
3009        int sig_index = 0;
3010        if (!m->is_static())
3011          sig_bt[sig_index++] = T_OBJECT; // 'this'
3012        for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
3013          BasicType t = ss.type();
3014          sig_bt[sig_index++] = t;
3015          if (type2size[t] == 2) {
3016            sig_bt[sig_index++] = T_VOID;
3017          } else {
3018            assert(type2size[t] == 1, "size is 1 or 2");
3019          }
3020        }
3021        assert(sig_index == sizeargs, "");
3022      }
3023      const char* spname = "sp"; // make arch-specific?
3024      intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, false);
3025      int stack_slot_offset = this->frame_size() * wordSize;
3026      int tab1 = 14, tab2 = 24;
3027      int sig_index = 0;
3028      int arg_index = (m->is_static() ? 0 : -1);
3029      bool did_old_sp = false;
3030      for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
3031        bool at_this = (arg_index == -1);
3032        bool at_old_sp = false;
3033        BasicType t = (at_this ? T_OBJECT : ss.type());
3034        assert(t == sig_bt[sig_index], "sigs in sync");
3035        if (at_this)
3036          stream->print("  # this: ");
3037        else
3038          stream->print("  # parm%d: ", arg_index);
3039        stream->move_to(tab1);
3040        VMReg fst = regs[sig_index].first();
3041        VMReg snd = regs[sig_index].second();
3042        if (fst->is_reg()) {
3043          stream->print("%s", fst->name());
3044          if (snd->is_valid())  {
3045            stream->print(":%s", snd->name());
3046          }
3047        } else if (fst->is_stack()) {
3048          int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset;
3049          if (offset == stack_slot_offset)  at_old_sp = true;
3050          stream->print("[%s+0x%x]", spname, offset);
3051        } else {
3052          stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd);
3053        }
3054        stream->print(" ");
3055        stream->move_to(tab2);
3056        stream->print("= ");
3057        if (at_this) {
3058          m->method_holder()->print_value_on(stream);
3059        } else {
3060          bool did_name = false;
3061          if (!at_this && ss.is_object()) {
3062            Symbol* name = ss.as_symbol_or_null();
3063            if (name != NULL) {
3064              name->print_value_on(stream);
3065              did_name = true;
3066            }
3067          }
3068          if (!did_name)
3069            stream->print("%s", type2name(t));
3070        }
3071        if (at_old_sp) {
3072          stream->print("  (%s of caller)", spname);
3073          did_old_sp = true;
3074        }
3075        stream->cr();
3076        sig_index += type2size[t];
3077        arg_index += 1;
3078        if (!at_this)  ss.next();
3079      }
3080      if (!did_old_sp) {
3081        stream->print("  # ");
3082        stream->move_to(tab1);
3083        stream->print("[%s+0x%x]", spname, stack_slot_offset);
3084        stream->print("  (%s of caller)", spname);
3085        stream->cr();
3086      }
3087    }
3088  }
3089}
3090
3091void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin, u_char* end) {
3092  // First, find an oopmap in (begin, end].
3093  // We use the odd half-closed interval so that oop maps and scope descs
3094  // which are tied to the byte after a call are printed with the call itself.
3095  address base = code_begin();
3096  OopMapSet* oms = oop_maps();
3097  if (oms != NULL) {
3098    for (int i = 0, imax = oms->size(); i < imax; i++) {
3099      OopMap* om = oms->at(i);
3100      address pc = base + om->offset();
3101      if (pc > begin) {
3102        if (pc <= end) {
3103          st->move_to(column);
3104          st->print("; ");
3105          om->print_on(st);
3106        }
3107        break;
3108      }
3109    }
3110  }
3111
3112  // Print any debug info present at this pc.
3113  ScopeDesc* sd  = scope_desc_in(begin, end);
3114  if (sd != NULL) {
3115    st->move_to(column);
3116    if (sd->bci() == SynchronizationEntryBCI) {
3117      st->print(";*synchronization entry");
3118    } else {
3119      if (sd->method() == NULL) {
3120        st->print("method is NULL");
3121      } else if (sd->method()->is_native()) {
3122        st->print("method is native");
3123      } else {
3124        Bytecodes::Code bc = sd->method()->java_code_at(sd->bci());
3125        st->print(";*%s", Bytecodes::name(bc));
3126        switch (bc) {
3127        case Bytecodes::_invokevirtual:
3128        case Bytecodes::_invokespecial:
3129        case Bytecodes::_invokestatic:
3130        case Bytecodes::_invokeinterface:
3131          {
3132            Bytecode_invoke invoke(sd->method(), sd->bci());
3133            st->print(" ");
3134            if (invoke.name() != NULL)
3135              invoke.name()->print_symbol_on(st);
3136            else
3137              st->print("<UNKNOWN>");
3138            break;
3139          }
3140        case Bytecodes::_getfield:
3141        case Bytecodes::_putfield:
3142        case Bytecodes::_getstatic:
3143        case Bytecodes::_putstatic:
3144          {
3145            Bytecode_field field(sd->method(), sd->bci());
3146            st->print(" ");
3147            if (field.name() != NULL)
3148              field.name()->print_symbol_on(st);
3149            else
3150              st->print("<UNKNOWN>");
3151          }
3152        }
3153      }
3154    }
3155
3156    // Print all scopes
3157    for (;sd != NULL; sd = sd->sender()) {
3158      st->move_to(column);
3159      st->print("; -");
3160      if (sd->method() == NULL) {
3161        st->print("method is NULL");
3162      } else {
3163        sd->method()->print_short_name(st);
3164      }
3165      int lineno = sd->method()->line_number_from_bci(sd->bci());
3166      if (lineno != -1) {
3167        st->print("@%d (line %d)", sd->bci(), lineno);
3168      } else {
3169        st->print("@%d", sd->bci());
3170      }
3171      st->cr();
3172    }
3173  }
3174
3175  // Print relocation information
3176  const char* str = reloc_string_for(begin, end);
3177  if (str != NULL) {
3178    if (sd != NULL) st->cr();
3179    st->move_to(column);
3180    st->print(";   {%s}", str);
3181  }
3182  int cont_offset = ImplicitExceptionTable(this).at(begin - code_begin());
3183  if (cont_offset != 0) {
3184    st->move_to(column);
3185    st->print("; implicit exception: dispatches to " INTPTR_FORMAT, code_begin() + cont_offset);
3186  }
3187
3188}
3189
3190#ifndef PRODUCT
3191
3192void nmethod::print_value_on(outputStream* st) const {
3193  st->print("nmethod");
3194  print_on(st, NULL);
3195}
3196
3197void nmethod::print_calls(outputStream* st) {
3198  RelocIterator iter(this);
3199  while (iter.next()) {
3200    switch (iter.type()) {
3201    case relocInfo::virtual_call_type:
3202    case relocInfo::opt_virtual_call_type: {
3203      VerifyMutexLocker mc(CompiledIC_lock);
3204      CompiledIC_at(&iter)->print();
3205      break;
3206    }
3207    case relocInfo::static_call_type:
3208      st->print_cr("Static call at " INTPTR_FORMAT, iter.reloc()->addr());
3209      compiledStaticCall_at(iter.reloc())->print();
3210      break;
3211    }
3212  }
3213}
3214
3215void nmethod::print_handler_table() {
3216  ExceptionHandlerTable(this).print();
3217}
3218
3219void nmethod::print_nul_chk_table() {
3220  ImplicitExceptionTable(this).print(code_begin());
3221}
3222
3223void nmethod::print_statistics() {
3224  ttyLocker ttyl;
3225  if (xtty != NULL)  xtty->head("statistics type='nmethod'");
3226  nmethod_stats.print_native_nmethod_stats();
3227  nmethod_stats.print_nmethod_stats();
3228  DebugInformationRecorder::print_statistics();
3229  nmethod_stats.print_pc_stats();
3230  Dependencies::print_statistics();
3231  if (xtty != NULL)  xtty->tail("statistics");
3232}
3233
3234#endif // PRODUCT
3235