nmethod.cpp revision 6019:28f281e8de1d
1/*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "code/codeCache.hpp"
27#include "code/compiledIC.hpp"
28#include "code/dependencies.hpp"
29#include "code/nmethod.hpp"
30#include "code/scopeDesc.hpp"
31#include "compiler/abstractCompiler.hpp"
32#include "compiler/compileBroker.hpp"
33#include "compiler/compileLog.hpp"
34#include "compiler/compilerOracle.hpp"
35#include "compiler/disassembler.hpp"
36#include "interpreter/bytecode.hpp"
37#include "oops/methodData.hpp"
38#include "prims/jvmtiRedefineClassesTrace.hpp"
39#include "prims/jvmtiImpl.hpp"
40#include "runtime/sharedRuntime.hpp"
41#include "runtime/sweeper.hpp"
42#include "utilities/dtrace.hpp"
43#include "utilities/events.hpp"
44#include "utilities/xmlstream.hpp"
45#ifdef SHARK
46#include "shark/sharkCompiler.hpp"
47#endif
48
49#ifdef DTRACE_ENABLED
50
51// Only bother with this argument setup if dtrace is available
52
53#define DTRACE_METHOD_UNLOAD_PROBE(method)                                \
54  {                                                                       \
55    Method* m = (method);                                                 \
56    if (m != NULL) {                                                      \
57      Symbol* klass_name = m->klass_name();                               \
58      Symbol* name = m->name();                                           \
59      Symbol* signature = m->signature();                                 \
60      HOTSPOT_COMPILED_METHOD_UNLOAD(                                     \
61        (char *) klass_name->bytes(), klass_name->utf8_length(),                   \
62        (char *) name->bytes(), name->utf8_length(),                               \
63        (char *) signature->bytes(), signature->utf8_length());                    \
64    }                                                                     \
65  }
66
67#else //  ndef DTRACE_ENABLED
68
69#define DTRACE_METHOD_UNLOAD_PROBE(method)
70
71#endif
72
73bool nmethod::is_compiled_by_c1() const {
74  if (compiler() == NULL) {
75    return false;
76  }
77  return compiler()->is_c1();
78}
79bool nmethod::is_compiled_by_c2() const {
80  if (compiler() == NULL) {
81    return false;
82  }
83  return compiler()->is_c2();
84}
85bool nmethod::is_compiled_by_shark() const {
86  if (compiler() == NULL) {
87    return false;
88  }
89  return compiler()->is_shark();
90}
91
92
93
94//---------------------------------------------------------------------------------
95// NMethod statistics
96// They are printed under various flags, including:
97//   PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation.
98// (In the latter two cases, they like other stats are printed to the log only.)
99
100#ifndef PRODUCT
101// These variables are put into one block to reduce relocations
102// and make it simpler to print from the debugger.
103static
104struct nmethod_stats_struct {
105  int nmethod_count;
106  int total_size;
107  int relocation_size;
108  int consts_size;
109  int insts_size;
110  int stub_size;
111  int scopes_data_size;
112  int scopes_pcs_size;
113  int dependencies_size;
114  int handler_table_size;
115  int nul_chk_table_size;
116  int oops_size;
117
118  void note_nmethod(nmethod* nm) {
119    nmethod_count += 1;
120    total_size          += nm->size();
121    relocation_size     += nm->relocation_size();
122    consts_size         += nm->consts_size();
123    insts_size          += nm->insts_size();
124    stub_size           += nm->stub_size();
125    oops_size           += nm->oops_size();
126    scopes_data_size    += nm->scopes_data_size();
127    scopes_pcs_size     += nm->scopes_pcs_size();
128    dependencies_size   += nm->dependencies_size();
129    handler_table_size  += nm->handler_table_size();
130    nul_chk_table_size  += nm->nul_chk_table_size();
131  }
132  void print_nmethod_stats() {
133    if (nmethod_count == 0)  return;
134    tty->print_cr("Statistics for %d bytecoded nmethods:", nmethod_count);
135    if (total_size != 0)          tty->print_cr(" total in heap  = %d", total_size);
136    if (relocation_size != 0)     tty->print_cr(" relocation     = %d", relocation_size);
137    if (consts_size != 0)         tty->print_cr(" constants      = %d", consts_size);
138    if (insts_size != 0)          tty->print_cr(" main code      = %d", insts_size);
139    if (stub_size != 0)           tty->print_cr(" stub code      = %d", stub_size);
140    if (oops_size != 0)           tty->print_cr(" oops           = %d", oops_size);
141    if (scopes_data_size != 0)    tty->print_cr(" scopes data    = %d", scopes_data_size);
142    if (scopes_pcs_size != 0)     tty->print_cr(" scopes pcs     = %d", scopes_pcs_size);
143    if (dependencies_size != 0)   tty->print_cr(" dependencies   = %d", dependencies_size);
144    if (handler_table_size != 0)  tty->print_cr(" handler table  = %d", handler_table_size);
145    if (nul_chk_table_size != 0)  tty->print_cr(" nul chk table  = %d", nul_chk_table_size);
146  }
147
148  int native_nmethod_count;
149  int native_total_size;
150  int native_relocation_size;
151  int native_insts_size;
152  int native_oops_size;
153  void note_native_nmethod(nmethod* nm) {
154    native_nmethod_count += 1;
155    native_total_size       += nm->size();
156    native_relocation_size  += nm->relocation_size();
157    native_insts_size       += nm->insts_size();
158    native_oops_size        += nm->oops_size();
159  }
160  void print_native_nmethod_stats() {
161    if (native_nmethod_count == 0)  return;
162    tty->print_cr("Statistics for %d native nmethods:", native_nmethod_count);
163    if (native_total_size != 0)       tty->print_cr(" N. total size  = %d", native_total_size);
164    if (native_relocation_size != 0)  tty->print_cr(" N. relocation  = %d", native_relocation_size);
165    if (native_insts_size != 0)       tty->print_cr(" N. main code   = %d", native_insts_size);
166    if (native_oops_size != 0)        tty->print_cr(" N. oops        = %d", native_oops_size);
167  }
168
169  int pc_desc_resets;   // number of resets (= number of caches)
170  int pc_desc_queries;  // queries to nmethod::find_pc_desc
171  int pc_desc_approx;   // number of those which have approximate true
172  int pc_desc_repeats;  // number of _pc_descs[0] hits
173  int pc_desc_hits;     // number of LRU cache hits
174  int pc_desc_tests;    // total number of PcDesc examinations
175  int pc_desc_searches; // total number of quasi-binary search steps
176  int pc_desc_adds;     // number of LUR cache insertions
177
178  void print_pc_stats() {
179    tty->print_cr("PcDesc Statistics:  %d queries, %.2f comparisons per query",
180                  pc_desc_queries,
181                  (double)(pc_desc_tests + pc_desc_searches)
182                  / pc_desc_queries);
183    tty->print_cr("  caches=%d queries=%d/%d, hits=%d+%d, tests=%d+%d, adds=%d",
184                  pc_desc_resets,
185                  pc_desc_queries, pc_desc_approx,
186                  pc_desc_repeats, pc_desc_hits,
187                  pc_desc_tests, pc_desc_searches, pc_desc_adds);
188  }
189} nmethod_stats;
190#endif //PRODUCT
191
192
193//---------------------------------------------------------------------------------
194
195
196ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
197  assert(pc != NULL, "Must be non null");
198  assert(exception.not_null(), "Must be non null");
199  assert(handler != NULL, "Must be non null");
200
201  _count = 0;
202  _exception_type = exception->klass();
203  _next = NULL;
204
205  add_address_and_handler(pc,handler);
206}
207
208
209address ExceptionCache::match(Handle exception, address pc) {
210  assert(pc != NULL,"Must be non null");
211  assert(exception.not_null(),"Must be non null");
212  if (exception->klass() == exception_type()) {
213    return (test_address(pc));
214  }
215
216  return NULL;
217}
218
219
220bool ExceptionCache::match_exception_with_space(Handle exception) {
221  assert(exception.not_null(),"Must be non null");
222  if (exception->klass() == exception_type() && count() < cache_size) {
223    return true;
224  }
225  return false;
226}
227
228
229address ExceptionCache::test_address(address addr) {
230  for (int i=0; i<count(); i++) {
231    if (pc_at(i) == addr) {
232      return handler_at(i);
233    }
234  }
235  return NULL;
236}
237
238
239bool ExceptionCache::add_address_and_handler(address addr, address handler) {
240  if (test_address(addr) == handler) return true;
241  if (count() < cache_size) {
242    set_pc_at(count(),addr);
243    set_handler_at(count(), handler);
244    increment_count();
245    return true;
246  }
247  return false;
248}
249
250
251// private method for handling exception cache
252// These methods are private, and used to manipulate the exception cache
253// directly.
254ExceptionCache* nmethod::exception_cache_entry_for_exception(Handle exception) {
255  ExceptionCache* ec = exception_cache();
256  while (ec != NULL) {
257    if (ec->match_exception_with_space(exception)) {
258      return ec;
259    }
260    ec = ec->next();
261  }
262  return NULL;
263}
264
265
266//-----------------------------------------------------------------------------
267
268
269// Helper used by both find_pc_desc methods.
270static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) {
271  NOT_PRODUCT(++nmethod_stats.pc_desc_tests);
272  if (!approximate)
273    return pc->pc_offset() == pc_offset;
274  else
275    return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset();
276}
277
278void PcDescCache::reset_to(PcDesc* initial_pc_desc) {
279  if (initial_pc_desc == NULL) {
280    _pc_descs[0] = NULL; // native method; no PcDescs at all
281    return;
282  }
283  NOT_PRODUCT(++nmethod_stats.pc_desc_resets);
284  // reset the cache by filling it with benign (non-null) values
285  assert(initial_pc_desc->pc_offset() < 0, "must be sentinel");
286  for (int i = 0; i < cache_size; i++)
287    _pc_descs[i] = initial_pc_desc;
288}
289
290PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
291  NOT_PRODUCT(++nmethod_stats.pc_desc_queries);
292  NOT_PRODUCT(if (approximate) ++nmethod_stats.pc_desc_approx);
293
294  // Note: one might think that caching the most recently
295  // read value separately would be a win, but one would be
296  // wrong.  When many threads are updating it, the cache
297  // line it's in would bounce between caches, negating
298  // any benefit.
299
300  // In order to prevent race conditions do not load cache elements
301  // repeatedly, but use a local copy:
302  PcDesc* res;
303
304  // Step one:  Check the most recently added value.
305  res = _pc_descs[0];
306  if (res == NULL) return NULL;  // native method; no PcDescs at all
307  if (match_desc(res, pc_offset, approximate)) {
308    NOT_PRODUCT(++nmethod_stats.pc_desc_repeats);
309    return res;
310  }
311
312  // Step two:  Check the rest of the LRU cache.
313  for (int i = 1; i < cache_size; ++i) {
314    res = _pc_descs[i];
315    if (res->pc_offset() < 0) break;  // optimization: skip empty cache
316    if (match_desc(res, pc_offset, approximate)) {
317      NOT_PRODUCT(++nmethod_stats.pc_desc_hits);
318      return res;
319    }
320  }
321
322  // Report failure.
323  return NULL;
324}
325
326void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
327  NOT_PRODUCT(++nmethod_stats.pc_desc_adds);
328  // Update the LRU cache by shifting pc_desc forward.
329  for (int i = 0; i < cache_size; i++)  {
330    PcDesc* next = _pc_descs[i];
331    _pc_descs[i] = pc_desc;
332    pc_desc = next;
333  }
334}
335
336// adjust pcs_size so that it is a multiple of both oopSize and
337// sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
338// of oopSize, then 2*sizeof(PcDesc) is)
339static int adjust_pcs_size(int pcs_size) {
340  int nsize = round_to(pcs_size,   oopSize);
341  if ((nsize % sizeof(PcDesc)) != 0) {
342    nsize = pcs_size + sizeof(PcDesc);
343  }
344  assert((nsize % oopSize) == 0, "correct alignment");
345  return nsize;
346}
347
348//-----------------------------------------------------------------------------
349
350
351void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {
352  assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
353  assert(new_entry != NULL,"Must be non null");
354  assert(new_entry->next() == NULL, "Must be null");
355
356  if (exception_cache() != NULL) {
357    new_entry->set_next(exception_cache());
358  }
359  set_exception_cache(new_entry);
360}
361
362void nmethod::remove_from_exception_cache(ExceptionCache* ec) {
363  ExceptionCache* prev = NULL;
364  ExceptionCache* curr = exception_cache();
365  assert(curr != NULL, "nothing to remove");
366  // find the previous and next entry of ec
367  while (curr != ec) {
368    prev = curr;
369    curr = curr->next();
370    assert(curr != NULL, "ExceptionCache not found");
371  }
372  // now: curr == ec
373  ExceptionCache* next = curr->next();
374  if (prev == NULL) {
375    set_exception_cache(next);
376  } else {
377    prev->set_next(next);
378  }
379  delete curr;
380}
381
382
383// public method for accessing the exception cache
384// These are the public access methods.
385address nmethod::handler_for_exception_and_pc(Handle exception, address pc) {
386  // We never grab a lock to read the exception cache, so we may
387  // have false negatives. This is okay, as it can only happen during
388  // the first few exception lookups for a given nmethod.
389  ExceptionCache* ec = exception_cache();
390  while (ec != NULL) {
391    address ret_val;
392    if ((ret_val = ec->match(exception,pc)) != NULL) {
393      return ret_val;
394    }
395    ec = ec->next();
396  }
397  return NULL;
398}
399
400
401void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
402  // There are potential race conditions during exception cache updates, so we
403  // must own the ExceptionCache_lock before doing ANY modifications. Because
404  // we don't lock during reads, it is possible to have several threads attempt
405  // to update the cache with the same data. We need to check for already inserted
406  // copies of the current data before adding it.
407
408  MutexLocker ml(ExceptionCache_lock);
409  ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
410
411  if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
412    target_entry = new ExceptionCache(exception,pc,handler);
413    add_exception_cache_entry(target_entry);
414  }
415}
416
417
418//-------------end of code for ExceptionCache--------------
419
420
421int nmethod::total_size() const {
422  return
423    consts_size()        +
424    insts_size()         +
425    stub_size()          +
426    scopes_data_size()   +
427    scopes_pcs_size()    +
428    handler_table_size() +
429    nul_chk_table_size();
430}
431
432const char* nmethod::compile_kind() const {
433  if (is_osr_method())     return "osr";
434  if (method() != NULL && is_native_method())  return "c2n";
435  return NULL;
436}
437
438// Fill in default values for various flag fields
439void nmethod::init_defaults() {
440  _state                      = in_use;
441  _marked_for_reclamation     = 0;
442  _has_flushed_dependencies   = 0;
443  _has_unsafe_access          = 0;
444  _has_method_handle_invokes  = 0;
445  _lazy_critical_native       = 0;
446  _has_wide_vectors           = 0;
447  _marked_for_deoptimization  = 0;
448  _lock_count                 = 0;
449  _stack_traversal_mark       = 0;
450  _unload_reported            = false;           // jvmti state
451
452#ifdef ASSERT
453  _oops_are_stale             = false;
454#endif
455
456  _oops_do_mark_link       = NULL;
457  _jmethod_id              = NULL;
458  _osr_link                = NULL;
459  _scavenge_root_link      = NULL;
460  _scavenge_root_state     = 0;
461  _compiler                = NULL;
462
463#ifdef HAVE_DTRACE_H
464  _trap_offset             = 0;
465#endif // def HAVE_DTRACE_H
466}
467
468nmethod* nmethod::new_native_nmethod(methodHandle method,
469  int compile_id,
470  CodeBuffer *code_buffer,
471  int vep_offset,
472  int frame_complete,
473  int frame_size,
474  ByteSize basic_lock_owner_sp_offset,
475  ByteSize basic_lock_sp_offset,
476  OopMapSet* oop_maps) {
477  code_buffer->finalize_oop_references(method);
478  // create nmethod
479  nmethod* nm = NULL;
480  {
481    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
482    int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
483    CodeOffsets offsets;
484    offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
485    offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
486    nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size,
487                                            compile_id, &offsets,
488                                            code_buffer, frame_size,
489                                            basic_lock_owner_sp_offset,
490                                            basic_lock_sp_offset, oop_maps);
491    NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_native_nmethod(nm));
492    if (PrintAssembly && nm != NULL) {
493      Disassembler::decode(nm);
494    }
495  }
496  // verify nmethod
497  debug_only(if (nm) nm->verify();) // might block
498
499  if (nm != NULL) {
500    nm->log_new_nmethod();
501  }
502
503  return nm;
504}
505
506#ifdef HAVE_DTRACE_H
507nmethod* nmethod::new_dtrace_nmethod(methodHandle method,
508                                     CodeBuffer *code_buffer,
509                                     int vep_offset,
510                                     int trap_offset,
511                                     int frame_complete,
512                                     int frame_size) {
513  code_buffer->finalize_oop_references(method);
514  // create nmethod
515  nmethod* nm = NULL;
516  {
517    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
518    int nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
519    CodeOffsets offsets;
520    offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
521    offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
522    offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
523
524    nm = new (nmethod_size) nmethod(method(), nmethod_size,
525                                    &offsets, code_buffer, frame_size);
526
527    NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_nmethod(nm));
528    if (PrintAssembly && nm != NULL) {
529      Disassembler::decode(nm);
530    }
531  }
532  // verify nmethod
533  debug_only(if (nm) nm->verify();) // might block
534
535  if (nm != NULL) {
536    nm->log_new_nmethod();
537  }
538
539  return nm;
540}
541
542#endif // def HAVE_DTRACE_H
543
544nmethod* nmethod::new_nmethod(methodHandle method,
545  int compile_id,
546  int entry_bci,
547  CodeOffsets* offsets,
548  int orig_pc_offset,
549  DebugInformationRecorder* debug_info,
550  Dependencies* dependencies,
551  CodeBuffer* code_buffer, int frame_size,
552  OopMapSet* oop_maps,
553  ExceptionHandlerTable* handler_table,
554  ImplicitExceptionTable* nul_chk_table,
555  AbstractCompiler* compiler,
556  int comp_level
557)
558{
559  assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
560  code_buffer->finalize_oop_references(method);
561  // create nmethod
562  nmethod* nm = NULL;
563  { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
564    int nmethod_size =
565      allocation_size(code_buffer, sizeof(nmethod))
566      + adjust_pcs_size(debug_info->pcs_size())
567      + round_to(dependencies->size_in_bytes() , oopSize)
568      + round_to(handler_table->size_in_bytes(), oopSize)
569      + round_to(nul_chk_table->size_in_bytes(), oopSize)
570      + round_to(debug_info->data_size()       , oopSize);
571
572    nm = new (nmethod_size)
573    nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
574            orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
575            oop_maps,
576            handler_table,
577            nul_chk_table,
578            compiler,
579            comp_level);
580
581    if (nm != NULL) {
582      // To make dependency checking during class loading fast, record
583      // the nmethod dependencies in the classes it is dependent on.
584      // This allows the dependency checking code to simply walk the
585      // class hierarchy above the loaded class, checking only nmethods
586      // which are dependent on those classes.  The slow way is to
587      // check every nmethod for dependencies which makes it linear in
588      // the number of methods compiled.  For applications with a lot
589      // classes the slow way is too slow.
590      for (Dependencies::DepStream deps(nm); deps.next(); ) {
591        Klass* klass = deps.context_type();
592        if (klass == NULL) {
593          continue;  // ignore things like evol_method
594        }
595
596        // record this nmethod as dependent on this klass
597        InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
598      }
599      NOT_PRODUCT(nmethod_stats.note_nmethod(nm));
600      if (PrintAssembly || CompilerOracle::has_option_string(method, "PrintAssembly")) {
601        Disassembler::decode(nm);
602      }
603    }
604  }
605  // Do verification and logging outside CodeCache_lock.
606  if (nm != NULL) {
607    // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
608    DEBUG_ONLY(nm->verify();)
609    nm->log_new_nmethod();
610  }
611  return nm;
612}
613
614
615// For native wrappers
616nmethod::nmethod(
617  Method* method,
618  int nmethod_size,
619  int compile_id,
620  CodeOffsets* offsets,
621  CodeBuffer* code_buffer,
622  int frame_size,
623  ByteSize basic_lock_owner_sp_offset,
624  ByteSize basic_lock_sp_offset,
625  OopMapSet* oop_maps )
626  : CodeBlob("native nmethod", code_buffer, sizeof(nmethod),
627             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
628  _native_receiver_sp_offset(basic_lock_owner_sp_offset),
629  _native_basic_lock_sp_offset(basic_lock_sp_offset)
630{
631  {
632    debug_only(No_Safepoint_Verifier nsv;)
633    assert_locked_or_safepoint(CodeCache_lock);
634
635    init_defaults();
636    _method                  = method;
637    _entry_bci               = InvocationEntryBci;
638    // We have no exception handler or deopt handler make the
639    // values something that will never match a pc like the nmethod vtable entry
640    _exception_offset        = 0;
641    _deoptimize_offset       = 0;
642    _deoptimize_mh_offset    = 0;
643    _orig_pc_offset          = 0;
644
645    _consts_offset           = data_offset();
646    _stub_offset             = data_offset();
647    _oops_offset             = data_offset();
648    _metadata_offset         = _oops_offset         + round_to(code_buffer->total_oop_size(), oopSize);
649    _scopes_data_offset      = _metadata_offset     + round_to(code_buffer->total_metadata_size(), wordSize);
650    _scopes_pcs_offset       = _scopes_data_offset;
651    _dependencies_offset     = _scopes_pcs_offset;
652    _handler_table_offset    = _dependencies_offset;
653    _nul_chk_table_offset    = _handler_table_offset;
654    _nmethod_end_offset      = _nul_chk_table_offset;
655    _compile_id              = compile_id;
656    _comp_level              = CompLevel_none;
657    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
658    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
659    _osr_entry_point         = NULL;
660    _exception_cache         = NULL;
661    _pc_desc_cache.reset_to(NULL);
662    _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
663
664    code_buffer->copy_values_to(this);
665    if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
666      CodeCache::add_scavenge_root_nmethod(this);
667      Universe::heap()->register_nmethod(this);
668    }
669    debug_only(verify_scavenge_root_oops());
670    CodeCache::commit(this);
671  }
672
673  if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
674    ttyLocker ttyl;  // keep the following output all in one block
675    // This output goes directly to the tty, not the compiler log.
676    // To enable tools to match it up with the compilation activity,
677    // be sure to tag this tty output with the compile ID.
678    if (xtty != NULL) {
679      xtty->begin_head("print_native_nmethod");
680      xtty->method(_method);
681      xtty->stamp();
682      xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
683    }
684    // print the header part first
685    print();
686    // then print the requested information
687    if (PrintNativeNMethods) {
688      print_code();
689      if (oop_maps != NULL) {
690        oop_maps->print();
691      }
692    }
693    if (PrintRelocations) {
694      print_relocations();
695    }
696    if (xtty != NULL) {
697      xtty->tail("print_native_nmethod");
698    }
699  }
700}
701
702// For dtrace wrappers
703#ifdef HAVE_DTRACE_H
704nmethod::nmethod(
705  Method* method,
706  int nmethod_size,
707  CodeOffsets* offsets,
708  CodeBuffer* code_buffer,
709  int frame_size)
710  : CodeBlob("dtrace nmethod", code_buffer, sizeof(nmethod),
711             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, NULL),
712  _native_receiver_sp_offset(in_ByteSize(-1)),
713  _native_basic_lock_sp_offset(in_ByteSize(-1))
714{
715  {
716    debug_only(No_Safepoint_Verifier nsv;)
717    assert_locked_or_safepoint(CodeCache_lock);
718
719    init_defaults();
720    _method                  = method;
721    _entry_bci               = InvocationEntryBci;
722    // We have no exception handler or deopt handler make the
723    // values something that will never match a pc like the nmethod vtable entry
724    _exception_offset        = 0;
725    _deoptimize_offset       = 0;
726    _deoptimize_mh_offset    = 0;
727    _unwind_handler_offset   = -1;
728    _trap_offset             = offsets->value(CodeOffsets::Dtrace_trap);
729    _orig_pc_offset          = 0;
730    _consts_offset           = data_offset();
731    _stub_offset             = data_offset();
732    _oops_offset             = data_offset();
733    _metadata_offset         = _oops_offset         + round_to(code_buffer->total_oop_size(), oopSize);
734    _scopes_data_offset      = _metadata_offset     + round_to(code_buffer->total_metadata_size(), wordSize);
735    _scopes_pcs_offset       = _scopes_data_offset;
736    _dependencies_offset     = _scopes_pcs_offset;
737    _handler_table_offset    = _dependencies_offset;
738    _nul_chk_table_offset    = _handler_table_offset;
739    _nmethod_end_offset      = _nul_chk_table_offset;
740    _compile_id              = 0;  // default
741    _comp_level              = CompLevel_none;
742    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
743    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
744    _osr_entry_point         = NULL;
745    _exception_cache         = NULL;
746    _pc_desc_cache.reset_to(NULL);
747    _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
748
749    code_buffer->copy_values_to(this);
750    debug_only(verify_scavenge_root_oops());
751    CodeCache::commit(this);
752  }
753
754  if (PrintNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
755    ttyLocker ttyl;  // keep the following output all in one block
756    // This output goes directly to the tty, not the compiler log.
757    // To enable tools to match it up with the compilation activity,
758    // be sure to tag this tty output with the compile ID.
759    if (xtty != NULL) {
760      xtty->begin_head("print_dtrace_nmethod");
761      xtty->method(_method);
762      xtty->stamp();
763      xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
764    }
765    // print the header part first
766    print();
767    // then print the requested information
768    if (PrintNMethods) {
769      print_code();
770    }
771    if (PrintRelocations) {
772      print_relocations();
773    }
774    if (xtty != NULL) {
775      xtty->tail("print_dtrace_nmethod");
776    }
777  }
778}
779#endif // def HAVE_DTRACE_H
780
781void* nmethod::operator new(size_t size, int nmethod_size) throw() {
782  // Not critical, may return null if there is too little continuous memory
783  return CodeCache::allocate(nmethod_size);
784}
785
786nmethod::nmethod(
787  Method* method,
788  int nmethod_size,
789  int compile_id,
790  int entry_bci,
791  CodeOffsets* offsets,
792  int orig_pc_offset,
793  DebugInformationRecorder* debug_info,
794  Dependencies* dependencies,
795  CodeBuffer *code_buffer,
796  int frame_size,
797  OopMapSet* oop_maps,
798  ExceptionHandlerTable* handler_table,
799  ImplicitExceptionTable* nul_chk_table,
800  AbstractCompiler* compiler,
801  int comp_level
802  )
803  : CodeBlob("nmethod", code_buffer, sizeof(nmethod),
804             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
805  _native_receiver_sp_offset(in_ByteSize(-1)),
806  _native_basic_lock_sp_offset(in_ByteSize(-1))
807{
808  assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
809  {
810    debug_only(No_Safepoint_Verifier nsv;)
811    assert_locked_or_safepoint(CodeCache_lock);
812
813    init_defaults();
814    _method                  = method;
815    _entry_bci               = entry_bci;
816    _compile_id              = compile_id;
817    _comp_level              = comp_level;
818    _compiler                = compiler;
819    _orig_pc_offset          = orig_pc_offset;
820    _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
821
822    // Section offsets
823    _consts_offset           = content_offset()      + code_buffer->total_offset_of(code_buffer->consts());
824    _stub_offset             = content_offset()      + code_buffer->total_offset_of(code_buffer->stubs());
825
826    // Exception handler and deopt handler are in the stub section
827    assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set");
828    assert(offsets->value(CodeOffsets::Deopt     ) != -1, "must be set");
829    _exception_offset        = _stub_offset          + offsets->value(CodeOffsets::Exceptions);
830    _deoptimize_offset       = _stub_offset          + offsets->value(CodeOffsets::Deopt);
831    if (offsets->value(CodeOffsets::DeoptMH) != -1) {
832      _deoptimize_mh_offset  = _stub_offset          + offsets->value(CodeOffsets::DeoptMH);
833    } else {
834      _deoptimize_mh_offset  = -1;
835    }
836    if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
837      _unwind_handler_offset = code_offset()         + offsets->value(CodeOffsets::UnwindHandler);
838    } else {
839      _unwind_handler_offset = -1;
840    }
841
842    _oops_offset             = data_offset();
843    _metadata_offset         = _oops_offset          + round_to(code_buffer->total_oop_size(), oopSize);
844    _scopes_data_offset      = _metadata_offset      + round_to(code_buffer->total_metadata_size(), wordSize);
845
846    _scopes_pcs_offset       = _scopes_data_offset   + round_to(debug_info->data_size       (), oopSize);
847    _dependencies_offset     = _scopes_pcs_offset    + adjust_pcs_size(debug_info->pcs_size());
848    _handler_table_offset    = _dependencies_offset  + round_to(dependencies->size_in_bytes (), oopSize);
849    _nul_chk_table_offset    = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize);
850    _nmethod_end_offset      = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize);
851
852    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
853    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
854    _osr_entry_point         = code_begin()          + offsets->value(CodeOffsets::OSR_Entry);
855    _exception_cache         = NULL;
856    _pc_desc_cache.reset_to(scopes_pcs_begin());
857
858    // Copy contents of ScopeDescRecorder to nmethod
859    code_buffer->copy_values_to(this);
860    debug_info->copy_to(this);
861    dependencies->copy_to(this);
862    if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
863      CodeCache::add_scavenge_root_nmethod(this);
864      Universe::heap()->register_nmethod(this);
865    }
866    debug_only(verify_scavenge_root_oops());
867
868    CodeCache::commit(this);
869
870    // Copy contents of ExceptionHandlerTable to nmethod
871    handler_table->copy_to(this);
872    nul_chk_table->copy_to(this);
873
874    // we use the information of entry points to find out if a method is
875    // static or non static
876    assert(compiler->is_c2() ||
877           _method->is_static() == (entry_point() == _verified_entry_point),
878           " entry points must be same for static methods and vice versa");
879  }
880
881  bool printnmethods = PrintNMethods
882    || CompilerOracle::should_print(_method)
883    || CompilerOracle::has_option_string(_method, "PrintNMethods");
884  if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
885    print_nmethod(printnmethods);
886  }
887}
888
889
890// Print a short set of xml attributes to identify this nmethod.  The
891// output should be embedded in some other element.
892void nmethod::log_identity(xmlStream* log) const {
893  log->print(" compile_id='%d'", compile_id());
894  const char* nm_kind = compile_kind();
895  if (nm_kind != NULL)  log->print(" compile_kind='%s'", nm_kind);
896  if (compiler() != NULL) {
897    log->print(" compiler='%s'", compiler()->name());
898  }
899  if (TieredCompilation) {
900    log->print(" level='%d'", comp_level());
901  }
902}
903
904
905#define LOG_OFFSET(log, name)                    \
906  if ((intptr_t)name##_end() - (intptr_t)name##_begin()) \
907    log->print(" " XSTR(name) "_offset='%d'"    , \
908               (intptr_t)name##_begin() - (intptr_t)this)
909
910
911void nmethod::log_new_nmethod() const {
912  if (LogCompilation && xtty != NULL) {
913    ttyLocker ttyl;
914    HandleMark hm;
915    xtty->begin_elem("nmethod");
916    log_identity(xtty);
917    xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", code_begin(), size());
918    xtty->print(" address='" INTPTR_FORMAT "'", (intptr_t) this);
919
920    LOG_OFFSET(xtty, relocation);
921    LOG_OFFSET(xtty, consts);
922    LOG_OFFSET(xtty, insts);
923    LOG_OFFSET(xtty, stub);
924    LOG_OFFSET(xtty, scopes_data);
925    LOG_OFFSET(xtty, scopes_pcs);
926    LOG_OFFSET(xtty, dependencies);
927    LOG_OFFSET(xtty, handler_table);
928    LOG_OFFSET(xtty, nul_chk_table);
929    LOG_OFFSET(xtty, oops);
930
931    xtty->method(method());
932    xtty->stamp();
933    xtty->end_elem();
934  }
935}
936
937#undef LOG_OFFSET
938
939
940// Print out more verbose output usually for a newly created nmethod.
941void nmethod::print_on(outputStream* st, const char* msg) const {
942  if (st != NULL) {
943    ttyLocker ttyl;
944    if (WizardMode) {
945      CompileTask::print_compilation(st, this, msg, /*short_form:*/ true);
946      st->print_cr(" (" INTPTR_FORMAT ")", this);
947    } else {
948      CompileTask::print_compilation(st, this, msg, /*short_form:*/ false);
949    }
950  }
951}
952
953
954void nmethod::print_nmethod(bool printmethod) {
955  ttyLocker ttyl;  // keep the following output all in one block
956  if (xtty != NULL) {
957    xtty->begin_head("print_nmethod");
958    xtty->stamp();
959    xtty->end_head();
960  }
961  // print the header part first
962  print();
963  // then print the requested information
964  if (printmethod) {
965    print_code();
966    print_pcs();
967    if (oop_maps()) {
968      oop_maps()->print();
969    }
970  }
971  if (PrintDebugInfo) {
972    print_scopes();
973  }
974  if (PrintRelocations) {
975    print_relocations();
976  }
977  if (PrintDependencies) {
978    print_dependencies();
979  }
980  if (PrintExceptionHandlers) {
981    print_handler_table();
982    print_nul_chk_table();
983  }
984  if (xtty != NULL) {
985    xtty->tail("print_nmethod");
986  }
987}
988
989
990// Promote one word from an assembly-time handle to a live embedded oop.
991inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
992  if (handle == NULL ||
993      // As a special case, IC oops are initialized to 1 or -1.
994      handle == (jobject) Universe::non_oop_word()) {
995    (*dest) = (oop) handle;
996  } else {
997    (*dest) = JNIHandles::resolve_non_null(handle);
998  }
999}
1000
1001
1002// Have to have the same name because it's called by a template
1003void nmethod::copy_values(GrowableArray<jobject>* array) {
1004  int length = array->length();
1005  assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
1006  oop* dest = oops_begin();
1007  for (int index = 0 ; index < length; index++) {
1008    initialize_immediate_oop(&dest[index], array->at(index));
1009  }
1010
1011  // Now we can fix up all the oops in the code.  We need to do this
1012  // in the code because the assembler uses jobjects as placeholders.
1013  // The code and relocations have already been initialized by the
1014  // CodeBlob constructor, so it is valid even at this early point to
1015  // iterate over relocations and patch the code.
1016  fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
1017}
1018
1019void nmethod::copy_values(GrowableArray<Metadata*>* array) {
1020  int length = array->length();
1021  assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough");
1022  Metadata** dest = metadata_begin();
1023  for (int index = 0 ; index < length; index++) {
1024    dest[index] = array->at(index);
1025  }
1026}
1027
1028bool nmethod::is_at_poll_return(address pc) {
1029  RelocIterator iter(this, pc, pc+1);
1030  while (iter.next()) {
1031    if (iter.type() == relocInfo::poll_return_type)
1032      return true;
1033  }
1034  return false;
1035}
1036
1037
1038bool nmethod::is_at_poll_or_poll_return(address pc) {
1039  RelocIterator iter(this, pc, pc+1);
1040  while (iter.next()) {
1041    relocInfo::relocType t = iter.type();
1042    if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
1043      return true;
1044  }
1045  return false;
1046}
1047
1048
1049void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
1050  // re-patch all oop-bearing instructions, just in case some oops moved
1051  RelocIterator iter(this, begin, end);
1052  while (iter.next()) {
1053    if (iter.type() == relocInfo::oop_type) {
1054      oop_Relocation* reloc = iter.oop_reloc();
1055      if (initialize_immediates && reloc->oop_is_immediate()) {
1056        oop* dest = reloc->oop_addr();
1057        initialize_immediate_oop(dest, (jobject) *dest);
1058      }
1059      // Refresh the oop-related bits of this instruction.
1060      reloc->fix_oop_relocation();
1061    } else if (iter.type() == relocInfo::metadata_type) {
1062      metadata_Relocation* reloc = iter.metadata_reloc();
1063      reloc->fix_metadata_relocation();
1064    }
1065  }
1066}
1067
1068
1069void nmethod::verify_oop_relocations() {
1070  // Ensure sure that the code matches the current oop values
1071  RelocIterator iter(this, NULL, NULL);
1072  while (iter.next()) {
1073    if (iter.type() == relocInfo::oop_type) {
1074      oop_Relocation* reloc = iter.oop_reloc();
1075      if (!reloc->oop_is_immediate()) {
1076        reloc->verify_oop_relocation();
1077      }
1078    }
1079  }
1080}
1081
1082
1083ScopeDesc* nmethod::scope_desc_at(address pc) {
1084  PcDesc* pd = pc_desc_at(pc);
1085  guarantee(pd != NULL, "scope must be present");
1086  return new ScopeDesc(this, pd->scope_decode_offset(),
1087                       pd->obj_decode_offset(), pd->should_reexecute(),
1088                       pd->return_oop());
1089}
1090
1091
1092void nmethod::clear_inline_caches() {
1093  assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
1094  if (is_zombie()) {
1095    return;
1096  }
1097
1098  RelocIterator iter(this);
1099  while (iter.next()) {
1100    iter.reloc()->clear_inline_cache();
1101  }
1102}
1103
1104
1105void nmethod::cleanup_inline_caches() {
1106
1107  assert_locked_or_safepoint(CompiledIC_lock);
1108
1109  // If the method is not entrant or zombie then a JMP is plastered over the
1110  // first few bytes.  If an oop in the old code was there, that oop
1111  // should not get GC'd.  Skip the first few bytes of oops on
1112  // not-entrant methods.
1113  address low_boundary = verified_entry_point();
1114  if (!is_in_use()) {
1115    low_boundary += NativeJump::instruction_size;
1116    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1117    // This means that the low_boundary is going to be a little too high.
1118    // This shouldn't matter, since oops of non-entrant methods are never used.
1119    // In fact, why are we bothering to look at oops in a non-entrant method??
1120  }
1121
1122  // Find all calls in an nmethod, and clear the ones that points to zombie methods
1123  ResourceMark rm;
1124  RelocIterator iter(this, low_boundary);
1125  while(iter.next()) {
1126    switch(iter.type()) {
1127      case relocInfo::virtual_call_type:
1128      case relocInfo::opt_virtual_call_type: {
1129        CompiledIC *ic = CompiledIC_at(iter.reloc());
1130        // Ok, to lookup references to zombies here
1131        CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
1132        if( cb != NULL && cb->is_nmethod() ) {
1133          nmethod* nm = (nmethod*)cb;
1134          // Clean inline caches pointing to both zombie and not_entrant methods
1135          if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean();
1136        }
1137        break;
1138      }
1139      case relocInfo::static_call_type: {
1140        CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
1141        CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
1142        if( cb != NULL && cb->is_nmethod() ) {
1143          nmethod* nm = (nmethod*)cb;
1144          // Clean inline caches pointing to both zombie and not_entrant methods
1145          if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();
1146        }
1147        break;
1148      }
1149    }
1150  }
1151}
1152
1153// This is a private interface with the sweeper.
1154void nmethod::mark_as_seen_on_stack() {
1155  assert(is_alive(), "Must be an alive method");
1156  // Set the traversal mark to ensure that the sweeper does 2
1157  // cleaning passes before moving to zombie.
1158  set_stack_traversal_mark(NMethodSweeper::traversal_count());
1159}
1160
1161// Tell if a non-entrant method can be converted to a zombie (i.e.,
1162// there are no activations on the stack, not in use by the VM,
1163// and not in use by the ServiceThread)
1164bool nmethod::can_not_entrant_be_converted() {
1165  assert(is_not_entrant(), "must be a non-entrant method");
1166
1167  // Since the nmethod sweeper only does partial sweep the sweeper's traversal
1168  // count can be greater than the stack traversal count before it hits the
1169  // nmethod for the second time.
1170  return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() &&
1171         !is_locked_by_vm();
1172}
1173
1174void nmethod::inc_decompile_count() {
1175  if (!is_compiled_by_c2()) return;
1176  // Could be gated by ProfileTraps, but do not bother...
1177  Method* m = method();
1178  if (m == NULL)  return;
1179  MethodData* mdo = m->method_data();
1180  if (mdo == NULL)  return;
1181  // There is a benign race here.  See comments in methodData.hpp.
1182  mdo->inc_decompile_count();
1183}
1184
1185void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
1186
1187  post_compiled_method_unload();
1188
1189  // Since this nmethod is being unloaded, make sure that dependencies
1190  // recorded in instanceKlasses get flushed and pass non-NULL closure to
1191  // indicate that this work is being done during a GC.
1192  assert(Universe::heap()->is_gc_active(), "should only be called during gc");
1193  assert(is_alive != NULL, "Should be non-NULL");
1194  // A non-NULL is_alive closure indicates that this is being called during GC.
1195  flush_dependencies(is_alive);
1196
1197  // Break cycle between nmethod & method
1198  if (TraceClassUnloading && WizardMode) {
1199    tty->print_cr("[Class unloading: Making nmethod " INTPTR_FORMAT
1200                  " unloadable], Method*(" INTPTR_FORMAT
1201                  "), cause(" INTPTR_FORMAT ")",
1202                  this, (address)_method, (address)cause);
1203    if (!Universe::heap()->is_gc_active())
1204      cause->klass()->print();
1205  }
1206  // Unlink the osr method, so we do not look this up again
1207  if (is_osr_method()) {
1208    invalidate_osr_method();
1209  }
1210  // If _method is already NULL the Method* is about to be unloaded,
1211  // so we don't have to break the cycle. Note that it is possible to
1212  // have the Method* live here, in case we unload the nmethod because
1213  // it is pointing to some oop (other than the Method*) being unloaded.
1214  if (_method != NULL) {
1215    // OSR methods point to the Method*, but the Method* does not
1216    // point back!
1217    if (_method->code() == this) {
1218      _method->clear_code(); // Break a cycle
1219    }
1220    _method = NULL;            // Clear the method of this dead nmethod
1221  }
1222  // Make the class unloaded - i.e., change state and notify sweeper
1223  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1224  if (is_in_use()) {
1225    // Transitioning directly from live to unloaded -- so
1226    // we need to force a cache clean-up; remember this
1227    // for later on.
1228    CodeCache::set_needs_cache_clean(true);
1229  }
1230  _state = unloaded;
1231
1232  // Log the unloading.
1233  log_state_change();
1234
1235  // The Method* is gone at this point
1236  assert(_method == NULL, "Tautology");
1237
1238  set_osr_link(NULL);
1239  //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
1240  NMethodSweeper::report_state_change(this);
1241}
1242
1243void nmethod::invalidate_osr_method() {
1244  assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
1245  // Remove from list of active nmethods
1246  if (method() != NULL)
1247    method()->method_holder()->remove_osr_nmethod(this);
1248  // Set entry as invalid
1249  _entry_bci = InvalidOSREntryBci;
1250}
1251
1252void nmethod::log_state_change() const {
1253  if (LogCompilation) {
1254    if (xtty != NULL) {
1255      ttyLocker ttyl;  // keep the following output all in one block
1256      if (_state == unloaded) {
1257        xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
1258                         os::current_thread_id());
1259      } else {
1260        xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
1261                         os::current_thread_id(),
1262                         (_state == zombie ? " zombie='1'" : ""));
1263      }
1264      log_identity(xtty);
1265      xtty->stamp();
1266      xtty->end_elem();
1267    }
1268  }
1269  if (PrintCompilation && _state != unloaded) {
1270    print_on(tty, _state == zombie ? "made zombie" : "made not entrant");
1271  }
1272}
1273
1274/**
1275 * Common functionality for both make_not_entrant and make_zombie
1276 */
1277bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
1278  assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
1279  assert(!is_zombie(), "should not already be a zombie");
1280
1281  // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
1282  nmethodLocker nml(this);
1283  methodHandle the_method(method());
1284  No_Safepoint_Verifier nsv;
1285
1286  // during patching, depending on the nmethod state we must notify the GC that
1287  // code has been unloaded, unregistering it. We cannot do this right while
1288  // holding the Patching_lock because we need to use the CodeCache_lock. This
1289  // would be prone to deadlocks.
1290  // This flag is used to remember whether we need to later lock and unregister.
1291  bool nmethod_needs_unregister = false;
1292
1293  {
1294    // invalidate osr nmethod before acquiring the patching lock since
1295    // they both acquire leaf locks and we don't want a deadlock.
1296    // This logic is equivalent to the logic below for patching the
1297    // verified entry point of regular methods.
1298    if (is_osr_method()) {
1299      // this effectively makes the osr nmethod not entrant
1300      invalidate_osr_method();
1301    }
1302
1303    // Enter critical section.  Does not block for safepoint.
1304    MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
1305
1306    if (_state == state) {
1307      // another thread already performed this transition so nothing
1308      // to do, but return false to indicate this.
1309      return false;
1310    }
1311
1312    // The caller can be calling the method statically or through an inline
1313    // cache call.
1314    if (!is_osr_method() && !is_not_entrant()) {
1315      NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
1316                  SharedRuntime::get_handle_wrong_method_stub());
1317    }
1318
1319    if (is_in_use()) {
1320      // It's a true state change, so mark the method as decompiled.
1321      // Do it only for transition from alive.
1322      inc_decompile_count();
1323    }
1324
1325    // If the state is becoming a zombie, signal to unregister the nmethod with
1326    // the heap.
1327    // This nmethod may have already been unloaded during a full GC.
1328    if ((state == zombie) && !is_unloaded()) {
1329      nmethod_needs_unregister = true;
1330    }
1331
1332    // Must happen before state change. Otherwise we have a race condition in
1333    // nmethod::can_not_entrant_be_converted(). I.e., a method can immediately
1334    // transition its state from 'not_entrant' to 'zombie' without having to wait
1335    // for stack scanning.
1336    if (state == not_entrant) {
1337      mark_as_seen_on_stack();
1338      OrderAccess::storestore();
1339    }
1340
1341    // Change state
1342    _state = state;
1343
1344    // Log the transition once
1345    log_state_change();
1346
1347    // Remove nmethod from method.
1348    // We need to check if both the _code and _from_compiled_code_entry_point
1349    // refer to this nmethod because there is a race in setting these two fields
1350    // in Method* as seen in bugid 4947125.
1351    // If the vep() points to the zombie nmethod, the memory for the nmethod
1352    // could be flushed and the compiler and vtable stubs could still call
1353    // through it.
1354    if (method() != NULL && (method()->code() == this ||
1355                             method()->from_compiled_entry() == verified_entry_point())) {
1356      HandleMark hm;
1357      method()->clear_code();
1358    }
1359  } // leave critical region under Patching_lock
1360
1361  // When the nmethod becomes zombie it is no longer alive so the
1362  // dependencies must be flushed.  nmethods in the not_entrant
1363  // state will be flushed later when the transition to zombie
1364  // happens or they get unloaded.
1365  if (state == zombie) {
1366    {
1367      // Flushing dependecies must be done before any possible
1368      // safepoint can sneak in, otherwise the oops used by the
1369      // dependency logic could have become stale.
1370      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1371      if (nmethod_needs_unregister) {
1372        Universe::heap()->unregister_nmethod(this);
1373      }
1374      flush_dependencies(NULL);
1375    }
1376
1377    // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
1378    // event and it hasn't already been reported for this nmethod then
1379    // report it now. The event may have been reported earilier if the GC
1380    // marked it for unloading). JvmtiDeferredEventQueue support means
1381    // we no longer go to a safepoint here.
1382    post_compiled_method_unload();
1383
1384#ifdef ASSERT
1385    // It's no longer safe to access the oops section since zombie
1386    // nmethods aren't scanned for GC.
1387    _oops_are_stale = true;
1388#endif
1389     // the Method may be reclaimed by class unloading now that the
1390     // nmethod is in zombie state
1391    set_method(NULL);
1392  } else {
1393    assert(state == not_entrant, "other cases may need to be handled differently");
1394  }
1395
1396  if (TraceCreateZombies) {
1397    tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie");
1398  }
1399
1400  NMethodSweeper::report_state_change(this);
1401  return true;
1402}
1403
1404void nmethod::flush() {
1405  // Note that there are no valid oops in the nmethod anymore.
1406  assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
1407  assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
1408
1409  assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
1410  assert_locked_or_safepoint(CodeCache_lock);
1411
1412  // completely deallocate this method
1413  Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
1414  if (PrintMethodFlushing) {
1415    tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
1416        _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);
1417  }
1418
1419  // We need to deallocate any ExceptionCache data.
1420  // Note that we do not need to grab the nmethod lock for this, it
1421  // better be thread safe if we're disposing of it!
1422  ExceptionCache* ec = exception_cache();
1423  set_exception_cache(NULL);
1424  while(ec != NULL) {
1425    ExceptionCache* next = ec->next();
1426    delete ec;
1427    ec = next;
1428  }
1429
1430  if (on_scavenge_root_list()) {
1431    CodeCache::drop_scavenge_root_nmethod(this);
1432  }
1433
1434#ifdef SHARK
1435  ((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
1436#endif // SHARK
1437
1438  ((CodeBlob*)(this))->flush();
1439
1440  CodeCache::free(this);
1441}
1442
1443
1444//
1445// Notify all classes this nmethod is dependent on that it is no
1446// longer dependent. This should only be called in two situations.
1447// First, when a nmethod transitions to a zombie all dependents need
1448// to be clear.  Since zombification happens at a safepoint there's no
1449// synchronization issues.  The second place is a little more tricky.
1450// During phase 1 of mark sweep class unloading may happen and as a
1451// result some nmethods may get unloaded.  In this case the flushing
1452// of dependencies must happen during phase 1 since after GC any
1453// dependencies in the unloaded nmethod won't be updated, so
1454// traversing the dependency information in unsafe.  In that case this
1455// function is called with a non-NULL argument and this function only
1456// notifies instanceKlasses that are reachable
1457
1458void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
1459  assert_locked_or_safepoint(CodeCache_lock);
1460  assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
1461  "is_alive is non-NULL if and only if we are called during GC");
1462  if (!has_flushed_dependencies()) {
1463    set_has_flushed_dependencies();
1464    for (Dependencies::DepStream deps(this); deps.next(); ) {
1465      Klass* klass = deps.context_type();
1466      if (klass == NULL)  continue;  // ignore things like evol_method
1467
1468      // During GC the is_alive closure is non-NULL, and is used to
1469      // determine liveness of dependees that need to be updated.
1470      if (is_alive == NULL || klass->is_loader_alive(is_alive)) {
1471        InstanceKlass::cast(klass)->remove_dependent_nmethod(this);
1472      }
1473    }
1474  }
1475}
1476
1477
1478// If this oop is not live, the nmethod can be unloaded.
1479bool nmethod::can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred) {
1480  assert(root != NULL, "just checking");
1481  oop obj = *root;
1482  if (obj == NULL || is_alive->do_object_b(obj)) {
1483      return false;
1484  }
1485
1486  // If ScavengeRootsInCode is true, an nmethod might be unloaded
1487  // simply because one of its constant oops has gone dead.
1488  // No actual classes need to be unloaded in order for this to occur.
1489  assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading");
1490  make_unloaded(is_alive, obj);
1491  return true;
1492}
1493
1494// ------------------------------------------------------------------
1495// post_compiled_method_load_event
1496// new method for install_code() path
1497// Transfer information from compilation to jvmti
1498void nmethod::post_compiled_method_load_event() {
1499
1500  Method* moop = method();
1501  HOTSPOT_COMPILED_METHOD_LOAD(
1502      (char *) moop->klass_name()->bytes(),
1503      moop->klass_name()->utf8_length(),
1504      (char *) moop->name()->bytes(),
1505      moop->name()->utf8_length(),
1506      (char *) moop->signature()->bytes(),
1507      moop->signature()->utf8_length(),
1508      insts_begin(), insts_size());
1509
1510  if (JvmtiExport::should_post_compiled_method_load() ||
1511      JvmtiExport::should_post_compiled_method_unload()) {
1512    get_and_cache_jmethod_id();
1513  }
1514
1515  if (JvmtiExport::should_post_compiled_method_load()) {
1516    // Let the Service thread (which is a real Java thread) post the event
1517    MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
1518    JvmtiDeferredEventQueue::enqueue(
1519      JvmtiDeferredEvent::compiled_method_load_event(this));
1520  }
1521}
1522
1523jmethodID nmethod::get_and_cache_jmethod_id() {
1524  if (_jmethod_id == NULL) {
1525    // Cache the jmethod_id since it can no longer be looked up once the
1526    // method itself has been marked for unloading.
1527    _jmethod_id = method()->jmethod_id();
1528  }
1529  return _jmethod_id;
1530}
1531
1532void nmethod::post_compiled_method_unload() {
1533  if (unload_reported()) {
1534    // During unloading we transition to unloaded and then to zombie
1535    // and the unloading is reported during the first transition.
1536    return;
1537  }
1538
1539  assert(_method != NULL && !is_unloaded(), "just checking");
1540  DTRACE_METHOD_UNLOAD_PROBE(method());
1541
1542  // If a JVMTI agent has enabled the CompiledMethodUnload event then
1543  // post the event. Sometime later this nmethod will be made a zombie
1544  // by the sweeper but the Method* will not be valid at that point.
1545  // If the _jmethod_id is null then no load event was ever requested
1546  // so don't bother posting the unload.  The main reason for this is
1547  // that the jmethodID is a weak reference to the Method* so if
1548  // it's being unloaded there's no way to look it up since the weak
1549  // ref will have been cleared.
1550  if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
1551    assert(!unload_reported(), "already unloaded");
1552    JvmtiDeferredEvent event =
1553      JvmtiDeferredEvent::compiled_method_unload_event(this,
1554          _jmethod_id, insts_begin());
1555    if (SafepointSynchronize::is_at_safepoint()) {
1556      // Don't want to take the queueing lock. Add it as pending and
1557      // it will get enqueued later.
1558      JvmtiDeferredEventQueue::add_pending_event(event);
1559    } else {
1560      MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
1561      JvmtiDeferredEventQueue::enqueue(event);
1562    }
1563  }
1564
1565  // The JVMTI CompiledMethodUnload event can be enabled or disabled at
1566  // any time. As the nmethod is being unloaded now we mark it has
1567  // having the unload event reported - this will ensure that we don't
1568  // attempt to report the event in the unlikely scenario where the
1569  // event is enabled at the time the nmethod is made a zombie.
1570  set_unload_reported();
1571}
1572
1573// This is called at the end of the strong tracing/marking phase of a
1574// GC to unload an nmethod if it contains otherwise unreachable
1575// oops.
1576
1577void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
1578  // Make sure the oop's ready to receive visitors
1579  assert(!is_zombie() && !is_unloaded(),
1580         "should not call follow on zombie or unloaded nmethod");
1581
1582  // If the method is not entrant then a JMP is plastered over the
1583  // first few bytes.  If an oop in the old code was there, that oop
1584  // should not get GC'd.  Skip the first few bytes of oops on
1585  // not-entrant methods.
1586  address low_boundary = verified_entry_point();
1587  if (is_not_entrant()) {
1588    low_boundary += NativeJump::instruction_size;
1589    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1590    // (See comment above.)
1591  }
1592
1593  // The RedefineClasses() API can cause the class unloading invariant
1594  // to no longer be true. See jvmtiExport.hpp for details.
1595  // Also, leave a debugging breadcrumb in local flag.
1596  bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
1597  if (a_class_was_redefined) {
1598    // This set of the unloading_occurred flag is done before the
1599    // call to post_compiled_method_unload() so that the unloading
1600    // of this nmethod is reported.
1601    unloading_occurred = true;
1602  }
1603
1604  // Exception cache
1605  ExceptionCache* ec = exception_cache();
1606  while (ec != NULL) {
1607    Klass* ex_klass = ec->exception_type();
1608    ExceptionCache* next_ec = ec->next();
1609    if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
1610      remove_from_exception_cache(ec);
1611    }
1612    ec = next_ec;
1613  }
1614
1615  // If class unloading occurred we first iterate over all inline caches and
1616  // clear ICs where the cached oop is referring to an unloaded klass or method.
1617  // The remaining live cached oops will be traversed in the relocInfo::oop_type
1618  // iteration below.
1619  if (unloading_occurred) {
1620    RelocIterator iter(this, low_boundary);
1621    while(iter.next()) {
1622      if (iter.type() == relocInfo::virtual_call_type) {
1623        CompiledIC *ic = CompiledIC_at(iter.reloc());
1624        if (ic->is_icholder_call()) {
1625          // The only exception is compiledICHolder oops which may
1626          // yet be marked below. (We check this further below).
1627          CompiledICHolder* cichk_oop = ic->cached_icholder();
1628          if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
1629              cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
1630            continue;
1631          }
1632        } else {
1633          Metadata* ic_oop = ic->cached_metadata();
1634          if (ic_oop != NULL) {
1635            if (ic_oop->is_klass()) {
1636              if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
1637                continue;
1638              }
1639            } else if (ic_oop->is_method()) {
1640              if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
1641                continue;
1642              }
1643            } else {
1644              ShouldNotReachHere();
1645            }
1646          }
1647        }
1648        ic->set_to_clean();
1649      }
1650    }
1651  }
1652
1653  // Compiled code
1654  {
1655  RelocIterator iter(this, low_boundary);
1656  while (iter.next()) {
1657    if (iter.type() == relocInfo::oop_type) {
1658      oop_Relocation* r = iter.oop_reloc();
1659      // In this loop, we must only traverse those oops directly embedded in
1660      // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
1661      assert(1 == (r->oop_is_immediate()) +
1662                  (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1663             "oop must be found in exactly one place");
1664      if (r->oop_is_immediate() && r->oop_value() != NULL) {
1665        if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
1666          return;
1667        }
1668      }
1669    }
1670  }
1671  }
1672
1673
1674  // Scopes
1675  for (oop* p = oops_begin(); p < oops_end(); p++) {
1676    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
1677    if (can_unload(is_alive, p, unloading_occurred)) {
1678      return;
1679    }
1680  }
1681
1682  // Ensure that all metadata is still alive
1683  verify_metadata_loaders(low_boundary, is_alive);
1684}
1685
1686#ifdef ASSERT
1687
1688class CheckClass : AllStatic {
1689  static BoolObjectClosure* _is_alive;
1690
1691  // Check class_loader is alive for this bit of metadata.
1692  static void check_class(Metadata* md) {
1693    Klass* klass = NULL;
1694    if (md->is_klass()) {
1695      klass = ((Klass*)md);
1696    } else if (md->is_method()) {
1697      klass = ((Method*)md)->method_holder();
1698    } else if (md->is_methodData()) {
1699      klass = ((MethodData*)md)->method()->method_holder();
1700    } else {
1701      md->print();
1702      ShouldNotReachHere();
1703    }
1704    assert(klass->is_loader_alive(_is_alive), "must be alive");
1705  }
1706 public:
1707  static void do_check_class(BoolObjectClosure* is_alive, nmethod* nm) {
1708    assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint");
1709    _is_alive = is_alive;
1710    nm->metadata_do(check_class);
1711  }
1712};
1713
1714// This is called during a safepoint so can use static data
1715BoolObjectClosure* CheckClass::_is_alive = NULL;
1716#endif // ASSERT
1717
1718
1719// Processing of oop references should have been sufficient to keep
1720// all strong references alive.  Any weak references should have been
1721// cleared as well.  Visit all the metadata and ensure that it's
1722// really alive.
1723void nmethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) {
1724#ifdef ASSERT
1725    RelocIterator iter(this, low_boundary);
1726    while (iter.next()) {
1727    // static_stub_Relocations may have dangling references to
1728    // Method*s so trim them out here.  Otherwise it looks like
1729    // compiled code is maintaining a link to dead metadata.
1730    address static_call_addr = NULL;
1731    if (iter.type() == relocInfo::opt_virtual_call_type) {
1732      CompiledIC* cic = CompiledIC_at(iter.reloc());
1733      if (!cic->is_call_to_interpreted()) {
1734        static_call_addr = iter.addr();
1735      }
1736    } else if (iter.type() == relocInfo::static_call_type) {
1737      CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
1738      if (!csc->is_call_to_interpreted()) {
1739        static_call_addr = iter.addr();
1740      }
1741    }
1742    if (static_call_addr != NULL) {
1743      RelocIterator sciter(this, low_boundary);
1744      while (sciter.next()) {
1745        if (sciter.type() == relocInfo::static_stub_type &&
1746            sciter.static_stub_reloc()->static_call() == static_call_addr) {
1747          sciter.static_stub_reloc()->clear_inline_cache();
1748        }
1749      }
1750    }
1751  }
1752  // Check that the metadata embedded in the nmethod is alive
1753  CheckClass::do_check_class(is_alive, this);
1754#endif
1755}
1756
1757
1758// Iterate over metadata calling this function.   Used by RedefineClasses
1759void nmethod::metadata_do(void f(Metadata*)) {
1760  address low_boundary = verified_entry_point();
1761  if (is_not_entrant()) {
1762    low_boundary += NativeJump::instruction_size;
1763    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1764    // (See comment above.)
1765  }
1766  {
1767    // Visit all immediate references that are embedded in the instruction stream.
1768    RelocIterator iter(this, low_boundary);
1769    while (iter.next()) {
1770      if (iter.type() == relocInfo::metadata_type ) {
1771        metadata_Relocation* r = iter.metadata_reloc();
1772        // In this lmetadata, we must only follow those metadatas directly embedded in
1773        // the code.  Other metadatas (oop_index>0) are seen as part of
1774        // the metadata section below.
1775        assert(1 == (r->metadata_is_immediate()) +
1776               (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
1777               "metadata must be found in exactly one place");
1778        if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
1779          Metadata* md = r->metadata_value();
1780          f(md);
1781        }
1782      } else if (iter.type() == relocInfo::virtual_call_type) {
1783        // Check compiledIC holders associated with this nmethod
1784        CompiledIC *ic = CompiledIC_at(iter.reloc());
1785        if (ic->is_icholder_call()) {
1786          CompiledICHolder* cichk = ic->cached_icholder();
1787          f(cichk->holder_method());
1788          f(cichk->holder_klass());
1789        } else {
1790          Metadata* ic_oop = ic->cached_metadata();
1791          if (ic_oop != NULL) {
1792            f(ic_oop);
1793          }
1794        }
1795      }
1796    }
1797  }
1798
1799  // Visit the metadata section
1800  for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
1801    if (*p == Universe::non_oop_word() || *p == NULL)  continue;  // skip non-oops
1802    Metadata* md = *p;
1803    f(md);
1804  }
1805
1806  // Call function Method*, not embedded in these other places.
1807  if (_method != NULL) f(_method);
1808}
1809
1810void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
1811  // make sure the oops ready to receive visitors
1812  assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
1813  assert(!is_unloaded(), "should not call follow on unloaded nmethod");
1814
1815  // If the method is not entrant or zombie then a JMP is plastered over the
1816  // first few bytes.  If an oop in the old code was there, that oop
1817  // should not get GC'd.  Skip the first few bytes of oops on
1818  // not-entrant methods.
1819  address low_boundary = verified_entry_point();
1820  if (is_not_entrant()) {
1821    low_boundary += NativeJump::instruction_size;
1822    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1823    // (See comment above.)
1824  }
1825
1826  RelocIterator iter(this, low_boundary);
1827
1828  while (iter.next()) {
1829    if (iter.type() == relocInfo::oop_type ) {
1830      oop_Relocation* r = iter.oop_reloc();
1831      // In this loop, we must only follow those oops directly embedded in
1832      // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
1833      assert(1 == (r->oop_is_immediate()) +
1834                   (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1835             "oop must be found in exactly one place");
1836      if (r->oop_is_immediate() && r->oop_value() != NULL) {
1837        f->do_oop(r->oop_addr());
1838      }
1839    }
1840  }
1841
1842  // Scopes
1843  // This includes oop constants not inlined in the code stream.
1844  for (oop* p = oops_begin(); p < oops_end(); p++) {
1845    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
1846    f->do_oop(p);
1847  }
1848}
1849
1850#define NMETHOD_SENTINEL ((nmethod*)badAddress)
1851
1852nmethod* volatile nmethod::_oops_do_mark_nmethods;
1853
1854// An nmethod is "marked" if its _mark_link is set non-null.
1855// Even if it is the end of the linked list, it will have a non-null link value,
1856// as long as it is on the list.
1857// This code must be MP safe, because it is used from parallel GC passes.
1858bool nmethod::test_set_oops_do_mark() {
1859  assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
1860  nmethod* observed_mark_link = _oops_do_mark_link;
1861  if (observed_mark_link == NULL) {
1862    // Claim this nmethod for this thread to mark.
1863    observed_mark_link = (nmethod*)
1864      Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_link, NULL);
1865    if (observed_mark_link == NULL) {
1866
1867      // Atomically append this nmethod (now claimed) to the head of the list:
1868      nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
1869      for (;;) {
1870        nmethod* required_mark_nmethods = observed_mark_nmethods;
1871        _oops_do_mark_link = required_mark_nmethods;
1872        observed_mark_nmethods = (nmethod*)
1873          Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods);
1874        if (observed_mark_nmethods == required_mark_nmethods)
1875          break;
1876      }
1877      // Mark was clear when we first saw this guy.
1878      NOT_PRODUCT(if (TraceScavenge)  print_on(tty, "oops_do, mark"));
1879      return false;
1880    }
1881  }
1882  // On fall through, another racing thread marked this nmethod before we did.
1883  return true;
1884}
1885
1886void nmethod::oops_do_marking_prologue() {
1887  NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("[oops_do_marking_prologue"));
1888  assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
1889  // We use cmpxchg_ptr instead of regular assignment here because the user
1890  // may fork a bunch of threads, and we need them all to see the same state.
1891  void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL);
1892  guarantee(observed == NULL, "no races in this sequential code");
1893}
1894
1895void nmethod::oops_do_marking_epilogue() {
1896  assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row");
1897  nmethod* cur = _oops_do_mark_nmethods;
1898  while (cur != NMETHOD_SENTINEL) {
1899    assert(cur != NULL, "not NULL-terminated");
1900    nmethod* next = cur->_oops_do_mark_link;
1901    cur->_oops_do_mark_link = NULL;
1902    cur->fix_oop_relocations();
1903    NOT_PRODUCT(if (TraceScavenge)  cur->print_on(tty, "oops_do, unmark"));
1904    cur = next;
1905  }
1906  void* required = _oops_do_mark_nmethods;
1907  void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
1908  guarantee(observed == required, "no races in this sequential code");
1909  NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("oops_do_marking_epilogue]"));
1910}
1911
1912class DetectScavengeRoot: public OopClosure {
1913  bool     _detected_scavenge_root;
1914public:
1915  DetectScavengeRoot() : _detected_scavenge_root(false)
1916  { NOT_PRODUCT(_print_nm = NULL); }
1917  bool detected_scavenge_root() { return _detected_scavenge_root; }
1918  virtual void do_oop(oop* p) {
1919    if ((*p) != NULL && (*p)->is_scavengable()) {
1920      NOT_PRODUCT(maybe_print(p));
1921      _detected_scavenge_root = true;
1922    }
1923  }
1924  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
1925
1926#ifndef PRODUCT
1927  nmethod* _print_nm;
1928  void maybe_print(oop* p) {
1929    if (_print_nm == NULL)  return;
1930    if (!_detected_scavenge_root)  _print_nm->print_on(tty, "new scavenge root");
1931    tty->print_cr(""PTR_FORMAT"[offset=%d] detected scavengable oop "PTR_FORMAT" (found at "PTR_FORMAT")",
1932                  _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),
1933                  (void *)(*p), (intptr_t)p);
1934    (*p)->print();
1935  }
1936#endif //PRODUCT
1937};
1938
1939bool nmethod::detect_scavenge_root_oops() {
1940  DetectScavengeRoot detect_scavenge_root;
1941  NOT_PRODUCT(if (TraceScavenge)  detect_scavenge_root._print_nm = this);
1942  oops_do(&detect_scavenge_root);
1943  return detect_scavenge_root.detected_scavenge_root();
1944}
1945
1946// Method that knows how to preserve outgoing arguments at call. This method must be
1947// called with a frame corresponding to a Java invoke
1948void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
1949#ifndef SHARK
1950  if (!method()->is_native()) {
1951    SimpleScopeDesc ssd(this, fr.pc());
1952    Bytecode_invoke call(ssd.method(), ssd.bci());
1953    bool has_receiver = call.has_receiver();
1954    bool has_appendix = call.has_appendix();
1955    Symbol* signature = call.signature();
1956    fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
1957  }
1958#endif // !SHARK
1959}
1960
1961
1962oop nmethod::embeddedOop_at(u_char* p) {
1963  RelocIterator iter(this, p, p + 1);
1964  while (iter.next())
1965    if (iter.type() == relocInfo::oop_type) {
1966      return iter.oop_reloc()->oop_value();
1967    }
1968  return NULL;
1969}
1970
1971
1972inline bool includes(void* p, void* from, void* to) {
1973  return from <= p && p < to;
1974}
1975
1976
1977void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {
1978  assert(count >= 2, "must be sentinel values, at least");
1979
1980#ifdef ASSERT
1981  // must be sorted and unique; we do a binary search in find_pc_desc()
1982  int prev_offset = pcs[0].pc_offset();
1983  assert(prev_offset == PcDesc::lower_offset_limit,
1984         "must start with a sentinel");
1985  for (int i = 1; i < count; i++) {
1986    int this_offset = pcs[i].pc_offset();
1987    assert(this_offset > prev_offset, "offsets must be sorted");
1988    prev_offset = this_offset;
1989  }
1990  assert(prev_offset == PcDesc::upper_offset_limit,
1991         "must end with a sentinel");
1992#endif //ASSERT
1993
1994  // Search for MethodHandle invokes and tag the nmethod.
1995  for (int i = 0; i < count; i++) {
1996    if (pcs[i].is_method_handle_invoke()) {
1997      set_has_method_handle_invokes(true);
1998      break;
1999    }
2000  }
2001  assert(has_method_handle_invokes() == (_deoptimize_mh_offset != -1), "must have deopt mh handler");
2002
2003  int size = count * sizeof(PcDesc);
2004  assert(scopes_pcs_size() >= size, "oob");
2005  memcpy(scopes_pcs_begin(), pcs, size);
2006
2007  // Adjust the final sentinel downward.
2008  PcDesc* last_pc = &scopes_pcs_begin()[count-1];
2009  assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity");
2010  last_pc->set_pc_offset(content_size() + 1);
2011  for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) {
2012    // Fill any rounding gaps with copies of the last record.
2013    last_pc[1] = last_pc[0];
2014  }
2015  // The following assert could fail if sizeof(PcDesc) is not
2016  // an integral multiple of oopSize (the rounding term).
2017  // If it fails, change the logic to always allocate a multiple
2018  // of sizeof(PcDesc), and fill unused words with copies of *last_pc.
2019  assert(last_pc + 1 == scopes_pcs_end(), "must match exactly");
2020}
2021
2022void nmethod::copy_scopes_data(u_char* buffer, int size) {
2023  assert(scopes_data_size() >= size, "oob");
2024  memcpy(scopes_data_begin(), buffer, size);
2025}
2026
2027
2028#ifdef ASSERT
2029static PcDesc* linear_search(nmethod* nm, int pc_offset, bool approximate) {
2030  PcDesc* lower = nm->scopes_pcs_begin();
2031  PcDesc* upper = nm->scopes_pcs_end();
2032  lower += 1; // exclude initial sentinel
2033  PcDesc* res = NULL;
2034  for (PcDesc* p = lower; p < upper; p++) {
2035    NOT_PRODUCT(--nmethod_stats.pc_desc_tests);  // don't count this call to match_desc
2036    if (match_desc(p, pc_offset, approximate)) {
2037      if (res == NULL)
2038        res = p;
2039      else
2040        res = (PcDesc*) badAddress;
2041    }
2042  }
2043  return res;
2044}
2045#endif
2046
2047
2048// Finds a PcDesc with real-pc equal to "pc"
2049PcDesc* nmethod::find_pc_desc_internal(address pc, bool approximate) {
2050  address base_address = code_begin();
2051  if ((pc < base_address) ||
2052      (pc - base_address) >= (ptrdiff_t) PcDesc::upper_offset_limit) {
2053    return NULL;  // PC is wildly out of range
2054  }
2055  int pc_offset = (int) (pc - base_address);
2056
2057  // Check the PcDesc cache if it contains the desired PcDesc
2058  // (This as an almost 100% hit rate.)
2059  PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate);
2060  if (res != NULL) {
2061    assert(res == linear_search(this, pc_offset, approximate), "cache ok");
2062    return res;
2063  }
2064
2065  // Fallback algorithm: quasi-linear search for the PcDesc
2066  // Find the last pc_offset less than the given offset.
2067  // The successor must be the required match, if there is a match at all.
2068  // (Use a fixed radix to avoid expensive affine pointer arithmetic.)
2069  PcDesc* lower = scopes_pcs_begin();
2070  PcDesc* upper = scopes_pcs_end();
2071  upper -= 1; // exclude final sentinel
2072  if (lower >= upper)  return NULL;  // native method; no PcDescs at all
2073
2074#define assert_LU_OK \
2075  /* invariant on lower..upper during the following search: */ \
2076  assert(lower->pc_offset() <  pc_offset, "sanity"); \
2077  assert(upper->pc_offset() >= pc_offset, "sanity")
2078  assert_LU_OK;
2079
2080  // Use the last successful return as a split point.
2081  PcDesc* mid = _pc_desc_cache.last_pc_desc();
2082  NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
2083  if (mid->pc_offset() < pc_offset) {
2084    lower = mid;
2085  } else {
2086    upper = mid;
2087  }
2088
2089  // Take giant steps at first (4096, then 256, then 16, then 1)
2090  const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ debug_only(-1);
2091  const int RADIX = (1 << LOG2_RADIX);
2092  for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) {
2093    while ((mid = lower + step) < upper) {
2094      assert_LU_OK;
2095      NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
2096      if (mid->pc_offset() < pc_offset) {
2097        lower = mid;
2098      } else {
2099        upper = mid;
2100        break;
2101      }
2102    }
2103    assert_LU_OK;
2104  }
2105
2106  // Sneak up on the value with a linear search of length ~16.
2107  while (true) {
2108    assert_LU_OK;
2109    mid = lower + 1;
2110    NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
2111    if (mid->pc_offset() < pc_offset) {
2112      lower = mid;
2113    } else {
2114      upper = mid;
2115      break;
2116    }
2117  }
2118#undef assert_LU_OK
2119
2120  if (match_desc(upper, pc_offset, approximate)) {
2121    assert(upper == linear_search(this, pc_offset, approximate), "search ok");
2122    _pc_desc_cache.add_pc_desc(upper);
2123    return upper;
2124  } else {
2125    assert(NULL == linear_search(this, pc_offset, approximate), "search ok");
2126    return NULL;
2127  }
2128}
2129
2130
2131void nmethod::check_all_dependencies(DepChange& changes) {
2132  // Checked dependencies are allocated into this ResourceMark
2133  ResourceMark rm;
2134
2135  // Turn off dependency tracing while actually testing dependencies.
2136  NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
2137
2138  // 'dep_signature_buffers' caches already checked dependencies.
2139  DependencySignatureBuffer dep_signature_buffers;
2140
2141  // Iterate over live nmethods and check dependencies of all nmethods that are not
2142  // marked for deoptimization. A particular dependency is only checked once.
2143  for(nmethod* nm = CodeCache::alive_nmethod(CodeCache::first()); nm != NULL; nm = CodeCache::alive_nmethod(CodeCache::next(nm))) {
2144    if (!nm->is_marked_for_deoptimization()) {
2145      for (Dependencies::DepStream deps(nm); deps.next(); ) {
2146        // Construct abstraction of a dependency.
2147        const DependencySignature* current_sig = new DependencySignature(deps);
2148        // Determine if 'deps' is already checked. If it is not checked,
2149        // 'add_if_missing()' adds the dependency signature and returns
2150        // false.
2151        if (!dep_signature_buffers.add_if_missing(*current_sig)) {
2152          if (deps.check_dependency() != NULL) {
2153            // Dependency checking failed. Print out information about the failed
2154            // dependency and finally fail with an assert. We can fail here, since
2155            // dependency checking is never done in a product build.
2156            ResourceMark rm;
2157            changes.print();
2158            nm->print();
2159            nm->print_dependencies();
2160            assert(false, "Should have been marked for deoptimization");
2161          }
2162        }
2163      }
2164    }
2165  }
2166}
2167
2168bool nmethod::check_dependency_on(DepChange& changes) {
2169  // What has happened:
2170  // 1) a new class dependee has been added
2171  // 2) dependee and all its super classes have been marked
2172  bool found_check = false;  // set true if we are upset
2173  for (Dependencies::DepStream deps(this); deps.next(); ) {
2174    // Evaluate only relevant dependencies.
2175    if (deps.spot_check_dependency_at(changes) != NULL) {
2176      found_check = true;
2177      NOT_DEBUG(break);
2178    }
2179  }
2180  return found_check;
2181}
2182
2183bool nmethod::is_evol_dependent_on(Klass* dependee) {
2184  InstanceKlass *dependee_ik = InstanceKlass::cast(dependee);
2185  Array<Method*>* dependee_methods = dependee_ik->methods();
2186  for (Dependencies::DepStream deps(this); deps.next(); ) {
2187    if (deps.type() == Dependencies::evol_method) {
2188      Method* method = deps.method_argument(0);
2189      for (int j = 0; j < dependee_methods->length(); j++) {
2190        if (dependee_methods->at(j) == method) {
2191          // RC_TRACE macro has an embedded ResourceMark
2192          RC_TRACE(0x01000000,
2193            ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)",
2194            _method->method_holder()->external_name(),
2195            _method->name()->as_C_string(),
2196            _method->signature()->as_C_string(), compile_id(),
2197            method->method_holder()->external_name(),
2198            method->name()->as_C_string(),
2199            method->signature()->as_C_string()));
2200          if (TraceDependencies || LogCompilation)
2201            deps.log_dependency(dependee);
2202          return true;
2203        }
2204      }
2205    }
2206  }
2207  return false;
2208}
2209
2210// Called from mark_for_deoptimization, when dependee is invalidated.
2211bool nmethod::is_dependent_on_method(Method* dependee) {
2212  for (Dependencies::DepStream deps(this); deps.next(); ) {
2213    if (deps.type() != Dependencies::evol_method)
2214      continue;
2215    Method* method = deps.method_argument(0);
2216    if (method == dependee) return true;
2217  }
2218  return false;
2219}
2220
2221
2222bool nmethod::is_patchable_at(address instr_addr) {
2223  assert(insts_contains(instr_addr), "wrong nmethod used");
2224  if (is_zombie()) {
2225    // a zombie may never be patched
2226    return false;
2227  }
2228  return true;
2229}
2230
2231
2232address nmethod::continuation_for_implicit_exception(address pc) {
2233  // Exception happened outside inline-cache check code => we are inside
2234  // an active nmethod => use cpc to determine a return address
2235  int exception_offset = pc - code_begin();
2236  int cont_offset = ImplicitExceptionTable(this).at( exception_offset );
2237#ifdef ASSERT
2238  if (cont_offset == 0) {
2239    Thread* thread = ThreadLocalStorage::get_thread_slow();
2240    ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY
2241    HandleMark hm(thread);
2242    ResourceMark rm(thread);
2243    CodeBlob* cb = CodeCache::find_blob(pc);
2244    assert(cb != NULL && cb == this, "");
2245    tty->print_cr("implicit exception happened at " INTPTR_FORMAT, pc);
2246    print();
2247    method()->print_codes();
2248    print_code();
2249    print_pcs();
2250  }
2251#endif
2252  if (cont_offset == 0) {
2253    // Let the normal error handling report the exception
2254    return NULL;
2255  }
2256  return code_begin() + cont_offset;
2257}
2258
2259
2260
2261void nmethod_init() {
2262  // make sure you didn't forget to adjust the filler fields
2263  assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
2264}
2265
2266
2267//-------------------------------------------------------------------------------------------
2268
2269
2270// QQQ might we make this work from a frame??
2271nmethodLocker::nmethodLocker(address pc) {
2272  CodeBlob* cb = CodeCache::find_blob(pc);
2273  guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found");
2274  _nm = (nmethod*)cb;
2275  lock_nmethod(_nm);
2276}
2277
2278// Only JvmtiDeferredEvent::compiled_method_unload_event()
2279// should pass zombie_ok == true.
2280void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) {
2281  if (nm == NULL)  return;
2282  Atomic::inc(&nm->_lock_count);
2283  guarantee(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method");
2284}
2285
2286void nmethodLocker::unlock_nmethod(nmethod* nm) {
2287  if (nm == NULL)  return;
2288  Atomic::dec(&nm->_lock_count);
2289  guarantee(nm->_lock_count >= 0, "unmatched nmethod lock/unlock");
2290}
2291
2292
2293// -----------------------------------------------------------------------------
2294// nmethod::get_deopt_original_pc
2295//
2296// Return the original PC for the given PC if:
2297// (a) the given PC belongs to a nmethod and
2298// (b) it is a deopt PC
2299address nmethod::get_deopt_original_pc(const frame* fr) {
2300  if (fr->cb() == NULL)  return NULL;
2301
2302  nmethod* nm = fr->cb()->as_nmethod_or_null();
2303  if (nm != NULL && nm->is_deopt_pc(fr->pc()))
2304    return nm->get_original_pc(fr);
2305
2306  return NULL;
2307}
2308
2309
2310// -----------------------------------------------------------------------------
2311// MethodHandle
2312
2313bool nmethod::is_method_handle_return(address return_pc) {
2314  if (!has_method_handle_invokes())  return false;
2315  PcDesc* pd = pc_desc_at(return_pc);
2316  if (pd == NULL)
2317    return false;
2318  return pd->is_method_handle_invoke();
2319}
2320
2321
2322// -----------------------------------------------------------------------------
2323// Verification
2324
2325class VerifyOopsClosure: public OopClosure {
2326  nmethod* _nm;
2327  bool     _ok;
2328public:
2329  VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { }
2330  bool ok() { return _ok; }
2331  virtual void do_oop(oop* p) {
2332    if ((*p) == NULL || (*p)->is_oop())  return;
2333    if (_ok) {
2334      _nm->print_nmethod(true);
2335      _ok = false;
2336    }
2337    tty->print_cr("*** non-oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
2338                  (void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
2339  }
2340  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2341};
2342
2343void nmethod::verify() {
2344
2345  // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
2346  // seems odd.
2347
2348  if( is_zombie() || is_not_entrant() )
2349    return;
2350
2351  // Make sure all the entry points are correctly aligned for patching.
2352  NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());
2353
2354  // assert(method()->is_oop(), "must be valid");
2355
2356  ResourceMark rm;
2357
2358  if (!CodeCache::contains(this)) {
2359    fatal(err_msg("nmethod at " INTPTR_FORMAT " not in zone", this));
2360  }
2361
2362  if(is_native_method() )
2363    return;
2364
2365  nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
2366  if (nm != this) {
2367    fatal(err_msg("findNMethod did not find this nmethod (" INTPTR_FORMAT ")",
2368                  this));
2369  }
2370
2371  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2372    if (! p->verify(this)) {
2373      tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", this);
2374    }
2375  }
2376
2377  VerifyOopsClosure voc(this);
2378  oops_do(&voc);
2379  assert(voc.ok(), "embedded oops must be OK");
2380  verify_scavenge_root_oops();
2381
2382  verify_scopes();
2383}
2384
2385
2386void nmethod::verify_interrupt_point(address call_site) {
2387  // Verify IC only when nmethod installation is finished.
2388  bool is_installed = (method()->code() == this) // nmethod is in state 'in_use' and installed
2389                      || !this->is_in_use();     // nmethod is installed, but not in 'in_use' state
2390  if (is_installed) {
2391    Thread *cur = Thread::current();
2392    if (CompiledIC_lock->owner() == cur ||
2393        ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) &&
2394         SafepointSynchronize::is_at_safepoint())) {
2395      CompiledIC_at(this, call_site);
2396      CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
2397    } else {
2398      MutexLocker ml_verify (CompiledIC_lock);
2399      CompiledIC_at(this, call_site);
2400    }
2401  }
2402
2403  PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address());
2404  assert(pd != NULL, "PcDesc must exist");
2405  for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(),
2406                                     pd->obj_decode_offset(), pd->should_reexecute(),
2407                                     pd->return_oop());
2408       !sd->is_top(); sd = sd->sender()) {
2409    sd->verify();
2410  }
2411}
2412
2413void nmethod::verify_scopes() {
2414  if( !method() ) return;       // Runtime stubs have no scope
2415  if (method()->is_native()) return; // Ignore stub methods.
2416  // iterate through all interrupt point
2417  // and verify the debug information is valid.
2418  RelocIterator iter((nmethod*)this);
2419  while (iter.next()) {
2420    address stub = NULL;
2421    switch (iter.type()) {
2422      case relocInfo::virtual_call_type:
2423        verify_interrupt_point(iter.addr());
2424        break;
2425      case relocInfo::opt_virtual_call_type:
2426        stub = iter.opt_virtual_call_reloc()->static_stub();
2427        verify_interrupt_point(iter.addr());
2428        break;
2429      case relocInfo::static_call_type:
2430        stub = iter.static_call_reloc()->static_stub();
2431        //verify_interrupt_point(iter.addr());
2432        break;
2433      case relocInfo::runtime_call_type:
2434        address destination = iter.reloc()->value();
2435        // Right now there is no way to find out which entries support
2436        // an interrupt point.  It would be nice if we had this
2437        // information in a table.
2438        break;
2439    }
2440    assert(stub == NULL || stub_contains(stub), "static call stub outside stub section");
2441  }
2442}
2443
2444
2445// -----------------------------------------------------------------------------
2446// Non-product code
2447#ifndef PRODUCT
2448
2449class DebugScavengeRoot: public OopClosure {
2450  nmethod* _nm;
2451  bool     _ok;
2452public:
2453  DebugScavengeRoot(nmethod* nm) : _nm(nm), _ok(true) { }
2454  bool ok() { return _ok; }
2455  virtual void do_oop(oop* p) {
2456    if ((*p) == NULL || !(*p)->is_scavengable())  return;
2457    if (_ok) {
2458      _nm->print_nmethod(true);
2459      _ok = false;
2460    }
2461    tty->print_cr("*** scavengable oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
2462                  (void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
2463    (*p)->print();
2464  }
2465  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2466};
2467
2468void nmethod::verify_scavenge_root_oops() {
2469  if (!on_scavenge_root_list()) {
2470    // Actually look inside, to verify the claim that it's clean.
2471    DebugScavengeRoot debug_scavenge_root(this);
2472    oops_do(&debug_scavenge_root);
2473    if (!debug_scavenge_root.ok())
2474      fatal("found an unadvertised bad scavengable oop in the code cache");
2475  }
2476  assert(scavenge_root_not_marked(), "");
2477}
2478
2479#endif // PRODUCT
2480
2481// Printing operations
2482
2483void nmethod::print() const {
2484  ResourceMark rm;
2485  ttyLocker ttyl;   // keep the following output all in one block
2486
2487  tty->print("Compiled method ");
2488
2489  if (is_compiled_by_c1()) {
2490    tty->print("(c1) ");
2491  } else if (is_compiled_by_c2()) {
2492    tty->print("(c2) ");
2493  } else if (is_compiled_by_shark()) {
2494    tty->print("(shark) ");
2495  } else {
2496    tty->print("(nm) ");
2497  }
2498
2499  print_on(tty, NULL);
2500
2501  if (WizardMode) {
2502    tty->print("((nmethod*) "INTPTR_FORMAT ") ", this);
2503    tty->print(" for method " INTPTR_FORMAT , (address)method());
2504    tty->print(" { ");
2505    if (is_in_use())      tty->print("in_use ");
2506    if (is_not_entrant()) tty->print("not_entrant ");
2507    if (is_zombie())      tty->print("zombie ");
2508    if (is_unloaded())    tty->print("unloaded ");
2509    if (on_scavenge_root_list())  tty->print("scavenge_root ");
2510    tty->print_cr("}:");
2511  }
2512  if (size              () > 0) tty->print_cr(" total in heap  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2513                                              (address)this,
2514                                              (address)this + size(),
2515                                              size());
2516  if (relocation_size   () > 0) tty->print_cr(" relocation     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2517                                              relocation_begin(),
2518                                              relocation_end(),
2519                                              relocation_size());
2520  if (consts_size       () > 0) tty->print_cr(" constants      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2521                                              consts_begin(),
2522                                              consts_end(),
2523                                              consts_size());
2524  if (insts_size        () > 0) tty->print_cr(" main code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2525                                              insts_begin(),
2526                                              insts_end(),
2527                                              insts_size());
2528  if (stub_size         () > 0) tty->print_cr(" stub code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2529                                              stub_begin(),
2530                                              stub_end(),
2531                                              stub_size());
2532  if (oops_size         () > 0) tty->print_cr(" oops           [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2533                                              oops_begin(),
2534                                              oops_end(),
2535                                              oops_size());
2536  if (metadata_size      () > 0) tty->print_cr(" metadata       [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2537                                              metadata_begin(),
2538                                              metadata_end(),
2539                                              metadata_size());
2540  if (scopes_data_size  () > 0) tty->print_cr(" scopes data    [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2541                                              scopes_data_begin(),
2542                                              scopes_data_end(),
2543                                              scopes_data_size());
2544  if (scopes_pcs_size   () > 0) tty->print_cr(" scopes pcs     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2545                                              scopes_pcs_begin(),
2546                                              scopes_pcs_end(),
2547                                              scopes_pcs_size());
2548  if (dependencies_size () > 0) tty->print_cr(" dependencies   [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2549                                              dependencies_begin(),
2550                                              dependencies_end(),
2551                                              dependencies_size());
2552  if (handler_table_size() > 0) tty->print_cr(" handler table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2553                                              handler_table_begin(),
2554                                              handler_table_end(),
2555                                              handler_table_size());
2556  if (nul_chk_table_size() > 0) tty->print_cr(" nul chk table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2557                                              nul_chk_table_begin(),
2558                                              nul_chk_table_end(),
2559                                              nul_chk_table_size());
2560}
2561
2562void nmethod::print_code() {
2563  HandleMark hm;
2564  ResourceMark m;
2565  Disassembler::decode(this);
2566}
2567
2568
2569#ifndef PRODUCT
2570
2571void nmethod::print_scopes() {
2572  // Find the first pc desc for all scopes in the code and print it.
2573  ResourceMark rm;
2574  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2575    if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)
2576      continue;
2577
2578    ScopeDesc* sd = scope_desc_at(p->real_pc(this));
2579    sd->print_on(tty, p);
2580  }
2581}
2582
2583void nmethod::print_dependencies() {
2584  ResourceMark rm;
2585  ttyLocker ttyl;   // keep the following output all in one block
2586  tty->print_cr("Dependencies:");
2587  for (Dependencies::DepStream deps(this); deps.next(); ) {
2588    deps.print_dependency();
2589    Klass* ctxk = deps.context_type();
2590    if (ctxk != NULL) {
2591      if (ctxk->oop_is_instance() && ((InstanceKlass*)ctxk)->is_dependent_nmethod(this)) {
2592        tty->print_cr("   [nmethod<=klass]%s", ctxk->external_name());
2593      }
2594    }
2595    deps.log_dependency();  // put it into the xml log also
2596  }
2597}
2598
2599
2600void nmethod::print_relocations() {
2601  ResourceMark m;       // in case methods get printed via the debugger
2602  tty->print_cr("relocations:");
2603  RelocIterator iter(this);
2604  iter.print();
2605  if (UseRelocIndex) {
2606    jint* index_end   = (jint*)relocation_end() - 1;
2607    jint  index_size  = *index_end;
2608    jint* index_start = (jint*)( (address)index_end - index_size );
2609    tty->print_cr("    index @" INTPTR_FORMAT ": index_size=%d", index_start, index_size);
2610    if (index_size > 0) {
2611      jint* ip;
2612      for (ip = index_start; ip+2 <= index_end; ip += 2)
2613        tty->print_cr("  (%d %d) addr=" INTPTR_FORMAT " @" INTPTR_FORMAT,
2614                      ip[0],
2615                      ip[1],
2616                      header_end()+ip[0],
2617                      relocation_begin()-1+ip[1]);
2618      for (; ip < index_end; ip++)
2619        tty->print_cr("  (%d ?)", ip[0]);
2620      tty->print_cr("          @" INTPTR_FORMAT ": index_size=%d", ip, *ip);
2621      ip++;
2622      tty->print_cr("reloc_end @" INTPTR_FORMAT ":", ip);
2623    }
2624  }
2625}
2626
2627
2628void nmethod::print_pcs() {
2629  ResourceMark m;       // in case methods get printed via debugger
2630  tty->print_cr("pc-bytecode offsets:");
2631  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2632    p->print(this);
2633  }
2634}
2635
2636#endif // PRODUCT
2637
2638const char* nmethod::reloc_string_for(u_char* begin, u_char* end) {
2639  RelocIterator iter(this, begin, end);
2640  bool have_one = false;
2641  while (iter.next()) {
2642    have_one = true;
2643    switch (iter.type()) {
2644        case relocInfo::none:                  return "no_reloc";
2645        case relocInfo::oop_type: {
2646          stringStream st;
2647          oop_Relocation* r = iter.oop_reloc();
2648          oop obj = r->oop_value();
2649          st.print("oop(");
2650          if (obj == NULL) st.print("NULL");
2651          else obj->print_value_on(&st);
2652          st.print(")");
2653          return st.as_string();
2654        }
2655        case relocInfo::metadata_type: {
2656          stringStream st;
2657          metadata_Relocation* r = iter.metadata_reloc();
2658          Metadata* obj = r->metadata_value();
2659          st.print("metadata(");
2660          if (obj == NULL) st.print("NULL");
2661          else obj->print_value_on(&st);
2662          st.print(")");
2663          return st.as_string();
2664        }
2665        case relocInfo::virtual_call_type:     return "virtual_call";
2666        case relocInfo::opt_virtual_call_type: return "optimized virtual_call";
2667        case relocInfo::static_call_type:      return "static_call";
2668        case relocInfo::static_stub_type:      return "static_stub";
2669        case relocInfo::runtime_call_type:     return "runtime_call";
2670        case relocInfo::external_word_type:    return "external_word";
2671        case relocInfo::internal_word_type:    return "internal_word";
2672        case relocInfo::section_word_type:     return "section_word";
2673        case relocInfo::poll_type:             return "poll";
2674        case relocInfo::poll_return_type:      return "poll_return";
2675        case relocInfo::type_mask:             return "type_bit_mask";
2676    }
2677  }
2678  return have_one ? "other" : NULL;
2679}
2680
2681// Return a the last scope in (begin..end]
2682ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
2683  PcDesc* p = pc_desc_near(begin+1);
2684  if (p != NULL && p->real_pc(this) <= end) {
2685    return new ScopeDesc(this, p->scope_decode_offset(),
2686                         p->obj_decode_offset(), p->should_reexecute(),
2687                         p->return_oop());
2688  }
2689  return NULL;
2690}
2691
2692void nmethod::print_nmethod_labels(outputStream* stream, address block_begin) const {
2693  if (block_begin == entry_point())             stream->print_cr("[Entry Point]");
2694  if (block_begin == verified_entry_point())    stream->print_cr("[Verified Entry Point]");
2695  if (block_begin == exception_begin())         stream->print_cr("[Exception Handler]");
2696  if (block_begin == stub_begin())              stream->print_cr("[Stub Code]");
2697  if (block_begin == deopt_handler_begin())     stream->print_cr("[Deopt Handler Code]");
2698
2699  if (has_method_handle_invokes())
2700    if (block_begin == deopt_mh_handler_begin())  stream->print_cr("[Deopt MH Handler Code]");
2701
2702  if (block_begin == consts_begin())            stream->print_cr("[Constants]");
2703
2704  if (block_begin == entry_point()) {
2705    methodHandle m = method();
2706    if (m.not_null()) {
2707      stream->print("  # ");
2708      m->print_value_on(stream);
2709      stream->cr();
2710    }
2711    if (m.not_null() && !is_osr_method()) {
2712      ResourceMark rm;
2713      int sizeargs = m->size_of_parameters();
2714      BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
2715      VMRegPair* regs   = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
2716      {
2717        int sig_index = 0;
2718        if (!m->is_static())
2719          sig_bt[sig_index++] = T_OBJECT; // 'this'
2720        for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
2721          BasicType t = ss.type();
2722          sig_bt[sig_index++] = t;
2723          if (type2size[t] == 2) {
2724            sig_bt[sig_index++] = T_VOID;
2725          } else {
2726            assert(type2size[t] == 1, "size is 1 or 2");
2727          }
2728        }
2729        assert(sig_index == sizeargs, "");
2730      }
2731      const char* spname = "sp"; // make arch-specific?
2732      intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, false);
2733      int stack_slot_offset = this->frame_size() * wordSize;
2734      int tab1 = 14, tab2 = 24;
2735      int sig_index = 0;
2736      int arg_index = (m->is_static() ? 0 : -1);
2737      bool did_old_sp = false;
2738      for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
2739        bool at_this = (arg_index == -1);
2740        bool at_old_sp = false;
2741        BasicType t = (at_this ? T_OBJECT : ss.type());
2742        assert(t == sig_bt[sig_index], "sigs in sync");
2743        if (at_this)
2744          stream->print("  # this: ");
2745        else
2746          stream->print("  # parm%d: ", arg_index);
2747        stream->move_to(tab1);
2748        VMReg fst = regs[sig_index].first();
2749        VMReg snd = regs[sig_index].second();
2750        if (fst->is_reg()) {
2751          stream->print("%s", fst->name());
2752          if (snd->is_valid())  {
2753            stream->print(":%s", snd->name());
2754          }
2755        } else if (fst->is_stack()) {
2756          int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset;
2757          if (offset == stack_slot_offset)  at_old_sp = true;
2758          stream->print("[%s+0x%x]", spname, offset);
2759        } else {
2760          stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd);
2761        }
2762        stream->print(" ");
2763        stream->move_to(tab2);
2764        stream->print("= ");
2765        if (at_this) {
2766          m->method_holder()->print_value_on(stream);
2767        } else {
2768          bool did_name = false;
2769          if (!at_this && ss.is_object()) {
2770            Symbol* name = ss.as_symbol_or_null();
2771            if (name != NULL) {
2772              name->print_value_on(stream);
2773              did_name = true;
2774            }
2775          }
2776          if (!did_name)
2777            stream->print("%s", type2name(t));
2778        }
2779        if (at_old_sp) {
2780          stream->print("  (%s of caller)", spname);
2781          did_old_sp = true;
2782        }
2783        stream->cr();
2784        sig_index += type2size[t];
2785        arg_index += 1;
2786        if (!at_this)  ss.next();
2787      }
2788      if (!did_old_sp) {
2789        stream->print("  # ");
2790        stream->move_to(tab1);
2791        stream->print("[%s+0x%x]", spname, stack_slot_offset);
2792        stream->print("  (%s of caller)", spname);
2793        stream->cr();
2794      }
2795    }
2796  }
2797}
2798
2799void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin, u_char* end) {
2800  // First, find an oopmap in (begin, end].
2801  // We use the odd half-closed interval so that oop maps and scope descs
2802  // which are tied to the byte after a call are printed with the call itself.
2803  address base = code_begin();
2804  OopMapSet* oms = oop_maps();
2805  if (oms != NULL) {
2806    for (int i = 0, imax = oms->size(); i < imax; i++) {
2807      OopMap* om = oms->at(i);
2808      address pc = base + om->offset();
2809      if (pc > begin) {
2810        if (pc <= end) {
2811          st->move_to(column);
2812          st->print("; ");
2813          om->print_on(st);
2814        }
2815        break;
2816      }
2817    }
2818  }
2819
2820  // Print any debug info present at this pc.
2821  ScopeDesc* sd  = scope_desc_in(begin, end);
2822  if (sd != NULL) {
2823    st->move_to(column);
2824    if (sd->bci() == SynchronizationEntryBCI) {
2825      st->print(";*synchronization entry");
2826    } else {
2827      if (sd->method() == NULL) {
2828        st->print("method is NULL");
2829      } else if (sd->method()->is_native()) {
2830        st->print("method is native");
2831      } else {
2832        Bytecodes::Code bc = sd->method()->java_code_at(sd->bci());
2833        st->print(";*%s", Bytecodes::name(bc));
2834        switch (bc) {
2835        case Bytecodes::_invokevirtual:
2836        case Bytecodes::_invokespecial:
2837        case Bytecodes::_invokestatic:
2838        case Bytecodes::_invokeinterface:
2839          {
2840            Bytecode_invoke invoke(sd->method(), sd->bci());
2841            st->print(" ");
2842            if (invoke.name() != NULL)
2843              invoke.name()->print_symbol_on(st);
2844            else
2845              st->print("<UNKNOWN>");
2846            break;
2847          }
2848        case Bytecodes::_getfield:
2849        case Bytecodes::_putfield:
2850        case Bytecodes::_getstatic:
2851        case Bytecodes::_putstatic:
2852          {
2853            Bytecode_field field(sd->method(), sd->bci());
2854            st->print(" ");
2855            if (field.name() != NULL)
2856              field.name()->print_symbol_on(st);
2857            else
2858              st->print("<UNKNOWN>");
2859          }
2860        }
2861      }
2862    }
2863
2864    // Print all scopes
2865    for (;sd != NULL; sd = sd->sender()) {
2866      st->move_to(column);
2867      st->print("; -");
2868      if (sd->method() == NULL) {
2869        st->print("method is NULL");
2870      } else {
2871        sd->method()->print_short_name(st);
2872      }
2873      int lineno = sd->method()->line_number_from_bci(sd->bci());
2874      if (lineno != -1) {
2875        st->print("@%d (line %d)", sd->bci(), lineno);
2876      } else {
2877        st->print("@%d", sd->bci());
2878      }
2879      st->cr();
2880    }
2881  }
2882
2883  // Print relocation information
2884  const char* str = reloc_string_for(begin, end);
2885  if (str != NULL) {
2886    if (sd != NULL) st->cr();
2887    st->move_to(column);
2888    st->print(";   {%s}", str);
2889  }
2890  int cont_offset = ImplicitExceptionTable(this).at(begin - code_begin());
2891  if (cont_offset != 0) {
2892    st->move_to(column);
2893    st->print("; implicit exception: dispatches to " INTPTR_FORMAT, code_begin() + cont_offset);
2894  }
2895
2896}
2897
2898#ifndef PRODUCT
2899
2900void nmethod::print_value_on(outputStream* st) const {
2901  st->print("nmethod");
2902  print_on(st, NULL);
2903}
2904
2905void nmethod::print_calls(outputStream* st) {
2906  RelocIterator iter(this);
2907  while (iter.next()) {
2908    switch (iter.type()) {
2909    case relocInfo::virtual_call_type:
2910    case relocInfo::opt_virtual_call_type: {
2911      VerifyMutexLocker mc(CompiledIC_lock);
2912      CompiledIC_at(iter.reloc())->print();
2913      break;
2914    }
2915    case relocInfo::static_call_type:
2916      st->print_cr("Static call at " INTPTR_FORMAT, iter.reloc()->addr());
2917      compiledStaticCall_at(iter.reloc())->print();
2918      break;
2919    }
2920  }
2921}
2922
2923void nmethod::print_handler_table() {
2924  ExceptionHandlerTable(this).print();
2925}
2926
2927void nmethod::print_nul_chk_table() {
2928  ImplicitExceptionTable(this).print(code_begin());
2929}
2930
2931void nmethod::print_statistics() {
2932  ttyLocker ttyl;
2933  if (xtty != NULL)  xtty->head("statistics type='nmethod'");
2934  nmethod_stats.print_native_nmethod_stats();
2935  nmethod_stats.print_nmethod_stats();
2936  DebugInformationRecorder::print_statistics();
2937  nmethod_stats.print_pc_stats();
2938  Dependencies::print_statistics();
2939  if (xtty != NULL)  xtty->tail("statistics");
2940}
2941
2942#endif // PRODUCT
2943