nmethod.cpp revision 196:d1605aabd0a1
1/*
2 * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25# include "incls/_precompiled.incl"
26# include "incls/_nmethod.cpp.incl"
27
28#ifdef DTRACE_ENABLED
29
30// Only bother with this argument setup if dtrace is available
31
32HS_DTRACE_PROBE_DECL8(hotspot, compiled__method__load,
33  const char*, int, const char*, int, const char*, int, void*, size_t);
34
35HS_DTRACE_PROBE_DECL6(hotspot, compiled__method__unload,
36  char*, int, char*, int, char*, int);
37
38#define DTRACE_METHOD_UNLOAD_PROBE(method)                                \
39  {                                                                       \
40    methodOop m = (method);                                               \
41    if (m != NULL) {                                                      \
42      symbolOop klass_name = m->klass_name();                             \
43      symbolOop name = m->name();                                         \
44      symbolOop signature = m->signature();                               \
45      HS_DTRACE_PROBE6(hotspot, compiled__method__unload,                 \
46        klass_name->bytes(), klass_name->utf8_length(),                   \
47        name->bytes(), name->utf8_length(),                               \
48        signature->bytes(), signature->utf8_length());                    \
49    }                                                                     \
50  }
51
52#else //  ndef DTRACE_ENABLED
53
54#define DTRACE_METHOD_UNLOAD_PROBE(method)
55
56#endif
57
58bool nmethod::is_compiled_by_c1() const {
59  if (is_native_method()) return false;
60  assert(compiler() != NULL, "must be");
61  return compiler()->is_c1();
62}
63bool nmethod::is_compiled_by_c2() const {
64  if (is_native_method()) return false;
65  assert(compiler() != NULL, "must be");
66  return compiler()->is_c2();
67}
68
69
70
71//---------------------------------------------------------------------------------
72// NMethod statistics
73// They are printed under various flags, including:
74//   PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation.
75// (In the latter two cases, they like other stats are printed to the log only.)
76
77#ifndef PRODUCT
78// These variables are put into one block to reduce relocations
79// and make it simpler to print from the debugger.
80static
81struct nmethod_stats_struct {
82  int nmethod_count;
83  int total_size;
84  int relocation_size;
85  int code_size;
86  int stub_size;
87  int consts_size;
88  int scopes_data_size;
89  int scopes_pcs_size;
90  int dependencies_size;
91  int handler_table_size;
92  int nul_chk_table_size;
93  int oops_size;
94
95  void note_nmethod(nmethod* nm) {
96    nmethod_count += 1;
97    total_size          += nm->size();
98    relocation_size     += nm->relocation_size();
99    code_size           += nm->code_size();
100    stub_size           += nm->stub_size();
101    consts_size         += nm->consts_size();
102    scopes_data_size    += nm->scopes_data_size();
103    scopes_pcs_size     += nm->scopes_pcs_size();
104    dependencies_size   += nm->dependencies_size();
105    handler_table_size  += nm->handler_table_size();
106    nul_chk_table_size  += nm->nul_chk_table_size();
107    oops_size += nm->oops_size();
108  }
109  void print_nmethod_stats() {
110    if (nmethod_count == 0)  return;
111    tty->print_cr("Statistics for %d bytecoded nmethods:", nmethod_count);
112    if (total_size != 0)          tty->print_cr(" total in heap  = %d", total_size);
113    if (relocation_size != 0)     tty->print_cr(" relocation     = %d", relocation_size);
114    if (code_size != 0)           tty->print_cr(" main code      = %d", code_size);
115    if (stub_size != 0)           tty->print_cr(" stub code      = %d", stub_size);
116    if (consts_size != 0)         tty->print_cr(" constants      = %d", consts_size);
117    if (scopes_data_size != 0)    tty->print_cr(" scopes data    = %d", scopes_data_size);
118    if (scopes_pcs_size != 0)     tty->print_cr(" scopes pcs     = %d", scopes_pcs_size);
119    if (dependencies_size != 0)   tty->print_cr(" dependencies   = %d", dependencies_size);
120    if (handler_table_size != 0)  tty->print_cr(" handler table  = %d", handler_table_size);
121    if (nul_chk_table_size != 0)  tty->print_cr(" nul chk table  = %d", nul_chk_table_size);
122    if (oops_size != 0)           tty->print_cr(" oops           = %d", oops_size);
123  }
124
125  int native_nmethod_count;
126  int native_total_size;
127  int native_relocation_size;
128  int native_code_size;
129  int native_oops_size;
130  void note_native_nmethod(nmethod* nm) {
131    native_nmethod_count += 1;
132    native_total_size       += nm->size();
133    native_relocation_size  += nm->relocation_size();
134    native_code_size        += nm->code_size();
135    native_oops_size        += nm->oops_size();
136  }
137  void print_native_nmethod_stats() {
138    if (native_nmethod_count == 0)  return;
139    tty->print_cr("Statistics for %d native nmethods:", native_nmethod_count);
140    if (native_total_size != 0)       tty->print_cr(" N. total size  = %d", native_total_size);
141    if (native_relocation_size != 0)  tty->print_cr(" N. relocation  = %d", native_relocation_size);
142    if (native_code_size != 0)        tty->print_cr(" N. main code   = %d", native_code_size);
143    if (native_oops_size != 0)        tty->print_cr(" N. oops        = %d", native_oops_size);
144  }
145
146  int pc_desc_resets;   // number of resets (= number of caches)
147  int pc_desc_queries;  // queries to nmethod::find_pc_desc
148  int pc_desc_approx;   // number of those which have approximate true
149  int pc_desc_repeats;  // number of _last_pc_desc hits
150  int pc_desc_hits;     // number of LRU cache hits
151  int pc_desc_tests;    // total number of PcDesc examinations
152  int pc_desc_searches; // total number of quasi-binary search steps
153  int pc_desc_adds;     // number of LUR cache insertions
154
155  void print_pc_stats() {
156    tty->print_cr("PcDesc Statistics:  %d queries, %.2f comparisons per query",
157                  pc_desc_queries,
158                  (double)(pc_desc_tests + pc_desc_searches)
159                  / pc_desc_queries);
160    tty->print_cr("  caches=%d queries=%d/%d, hits=%d+%d, tests=%d+%d, adds=%d",
161                  pc_desc_resets,
162                  pc_desc_queries, pc_desc_approx,
163                  pc_desc_repeats, pc_desc_hits,
164                  pc_desc_tests, pc_desc_searches, pc_desc_adds);
165  }
166} nmethod_stats;
167#endif //PRODUCT
168
169//---------------------------------------------------------------------------------
170
171
172// The _unwind_handler is a special marker address, which says that
173// for given exception oop and address, the frame should be removed
174// as the tuple cannot be caught in the nmethod
175address ExceptionCache::_unwind_handler = (address) -1;
176
177
178ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
179  assert(pc != NULL, "Must be non null");
180  assert(exception.not_null(), "Must be non null");
181  assert(handler != NULL, "Must be non null");
182
183  _count = 0;
184  _exception_type = exception->klass();
185  _next = NULL;
186
187  add_address_and_handler(pc,handler);
188}
189
190
191address ExceptionCache::match(Handle exception, address pc) {
192  assert(pc != NULL,"Must be non null");
193  assert(exception.not_null(),"Must be non null");
194  if (exception->klass() == exception_type()) {
195    return (test_address(pc));
196  }
197
198  return NULL;
199}
200
201
202bool ExceptionCache::match_exception_with_space(Handle exception) {
203  assert(exception.not_null(),"Must be non null");
204  if (exception->klass() == exception_type() && count() < cache_size) {
205    return true;
206  }
207  return false;
208}
209
210
211address ExceptionCache::test_address(address addr) {
212  for (int i=0; i<count(); i++) {
213    if (pc_at(i) == addr) {
214      return handler_at(i);
215    }
216  }
217  return NULL;
218}
219
220
221bool ExceptionCache::add_address_and_handler(address addr, address handler) {
222  if (test_address(addr) == handler) return true;
223  if (count() < cache_size) {
224    set_pc_at(count(),addr);
225    set_handler_at(count(), handler);
226    increment_count();
227    return true;
228  }
229  return false;
230}
231
232
233// private method for handling exception cache
234// These methods are private, and used to manipulate the exception cache
235// directly.
236ExceptionCache* nmethod::exception_cache_entry_for_exception(Handle exception) {
237  ExceptionCache* ec = exception_cache();
238  while (ec != NULL) {
239    if (ec->match_exception_with_space(exception)) {
240      return ec;
241    }
242    ec = ec->next();
243  }
244  return NULL;
245}
246
247
248//-----------------------------------------------------------------------------
249
250
251// Helper used by both find_pc_desc methods.
252static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) {
253  NOT_PRODUCT(++nmethod_stats.pc_desc_tests);
254  if (!approximate)
255    return pc->pc_offset() == pc_offset;
256  else
257    return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset();
258}
259
260void PcDescCache::reset_to(PcDesc* initial_pc_desc) {
261  if (initial_pc_desc == NULL) {
262    _last_pc_desc = NULL;  // native method
263    return;
264  }
265  NOT_PRODUCT(++nmethod_stats.pc_desc_resets);
266  // reset the cache by filling it with benign (non-null) values
267  assert(initial_pc_desc->pc_offset() < 0, "must be sentinel");
268  _last_pc_desc = initial_pc_desc + 1;  // first valid one is after sentinel
269  for (int i = 0; i < cache_size; i++)
270    _pc_descs[i] = initial_pc_desc;
271}
272
273PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
274  NOT_PRODUCT(++nmethod_stats.pc_desc_queries);
275  NOT_PRODUCT(if (approximate)  ++nmethod_stats.pc_desc_approx);
276
277  // In order to prevent race conditions do not load cache elements
278  // repeatedly, but use a local copy:
279  PcDesc* res;
280
281  // Step one:  Check the most recently returned value.
282  res = _last_pc_desc;
283  if (res == NULL)  return NULL;  // native method; no PcDescs at all
284  if (match_desc(res, pc_offset, approximate)) {
285    NOT_PRODUCT(++nmethod_stats.pc_desc_repeats);
286    return res;
287  }
288
289  // Step two:  Check the LRU cache.
290  for (int i = 0; i < cache_size; i++) {
291    res = _pc_descs[i];
292    if (res->pc_offset() < 0)  break;  // optimization: skip empty cache
293    if (match_desc(res, pc_offset, approximate)) {
294      NOT_PRODUCT(++nmethod_stats.pc_desc_hits);
295      _last_pc_desc = res;  // record this cache hit in case of repeat
296      return res;
297    }
298  }
299
300  // Report failure.
301  return NULL;
302}
303
304void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
305  NOT_PRODUCT(++nmethod_stats.pc_desc_adds);
306  // Update the LRU cache by shifting pc_desc forward:
307  for (int i = 0; i < cache_size; i++)  {
308    PcDesc* next = _pc_descs[i];
309    _pc_descs[i] = pc_desc;
310    pc_desc = next;
311  }
312  // Note:  Do not update _last_pc_desc.  It fronts for the LRU cache.
313}
314
315// adjust pcs_size so that it is a multiple of both oopSize and
316// sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
317// of oopSize, then 2*sizeof(PcDesc) is)
318static int  adjust_pcs_size(int pcs_size) {
319  int nsize = round_to(pcs_size,   oopSize);
320  if ((nsize % sizeof(PcDesc)) != 0) {
321    nsize = pcs_size + sizeof(PcDesc);
322  }
323  assert((nsize %  oopSize) == 0, "correct alignment");
324  return nsize;
325}
326
327//-----------------------------------------------------------------------------
328
329
330void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {
331  assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
332  assert(new_entry != NULL,"Must be non null");
333  assert(new_entry->next() == NULL, "Must be null");
334
335  if (exception_cache() != NULL) {
336    new_entry->set_next(exception_cache());
337  }
338  set_exception_cache(new_entry);
339}
340
341void nmethod::remove_from_exception_cache(ExceptionCache* ec) {
342  ExceptionCache* prev = NULL;
343  ExceptionCache* curr = exception_cache();
344  assert(curr != NULL, "nothing to remove");
345  // find the previous and next entry of ec
346  while (curr != ec) {
347    prev = curr;
348    curr = curr->next();
349    assert(curr != NULL, "ExceptionCache not found");
350  }
351  // now: curr == ec
352  ExceptionCache* next = curr->next();
353  if (prev == NULL) {
354    set_exception_cache(next);
355  } else {
356    prev->set_next(next);
357  }
358  delete curr;
359}
360
361
362// public method for accessing the exception cache
363// These are the public access methods.
364address nmethod::handler_for_exception_and_pc(Handle exception, address pc) {
365  // We never grab a lock to read the exception cache, so we may
366  // have false negatives. This is okay, as it can only happen during
367  // the first few exception lookups for a given nmethod.
368  ExceptionCache* ec = exception_cache();
369  while (ec != NULL) {
370    address ret_val;
371    if ((ret_val = ec->match(exception,pc)) != NULL) {
372      return ret_val;
373    }
374    ec = ec->next();
375  }
376  return NULL;
377}
378
379
380void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
381  // There are potential race conditions during exception cache updates, so we
382  // must own the ExceptionCache_lock before doing ANY modifications. Because
383  // we dont lock during reads, it is possible to have several threads attempt
384  // to update the cache with the same data. We need to check for already inserted
385  // copies of the current data before adding it.
386
387  MutexLocker ml(ExceptionCache_lock);
388  ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
389
390  if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
391    target_entry = new ExceptionCache(exception,pc,handler);
392    add_exception_cache_entry(target_entry);
393  }
394}
395
396
397//-------------end of code for ExceptionCache--------------
398
399
400void nmFlags::clear() {
401  assert(sizeof(nmFlags) == sizeof(int), "using more than one word for nmFlags");
402  *(jint*)this = 0;
403}
404
405int nmethod::total_size() const {
406  return
407    code_size()          +
408    stub_size()          +
409    consts_size()        +
410    scopes_data_size()   +
411    scopes_pcs_size()    +
412    handler_table_size() +
413    nul_chk_table_size();
414}
415
416const char* nmethod::compile_kind() const {
417  if (method() == NULL)    return "unloaded";
418  if (is_native_method())  return "c2n";
419  if (is_osr_method())     return "osr";
420  return NULL;
421}
422
423// %%% This variable is no longer used?
424int nmethod::_zombie_instruction_size = NativeJump::instruction_size;
425
426
427nmethod* nmethod::new_native_nmethod(methodHandle method,
428  CodeBuffer *code_buffer,
429  int vep_offset,
430  int frame_complete,
431  int frame_size,
432  ByteSize basic_lock_owner_sp_offset,
433  ByteSize basic_lock_sp_offset,
434  OopMapSet* oop_maps) {
435  // create nmethod
436  nmethod* nm = NULL;
437  {
438    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
439    int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
440    CodeOffsets offsets;
441    offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
442    offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
443    nm = new (native_nmethod_size)
444      nmethod(method(), native_nmethod_size, &offsets,
445              code_buffer, frame_size,
446              basic_lock_owner_sp_offset, basic_lock_sp_offset,
447              oop_maps);
448    NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_native_nmethod(nm));
449    if (PrintAssembly && nm != NULL)
450      Disassembler::decode(nm);
451  }
452  // verify nmethod
453  debug_only(if (nm) nm->verify();) // might block
454
455  if (nm != NULL) {
456    nm->log_new_nmethod();
457  }
458
459  return nm;
460}
461
462#ifdef HAVE_DTRACE_H
463nmethod* nmethod::new_dtrace_nmethod(methodHandle method,
464                                     CodeBuffer *code_buffer,
465                                     int vep_offset,
466                                     int trap_offset,
467                                     int frame_complete,
468                                     int frame_size) {
469  // create nmethod
470  nmethod* nm = NULL;
471  {
472    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
473    int nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
474    CodeOffsets offsets;
475    offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
476    offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
477    offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
478
479    nm = new (nmethod_size) nmethod(method(), nmethod_size, &offsets, code_buffer, frame_size);
480
481    NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_nmethod(nm));
482    if (PrintAssembly && nm != NULL)
483      Disassembler::decode(nm);
484  }
485  // verify nmethod
486  debug_only(if (nm) nm->verify();) // might block
487
488  if (nm != NULL) {
489    nm->log_new_nmethod();
490  }
491
492  return nm;
493}
494
495#endif // def HAVE_DTRACE_H
496
497nmethod* nmethod::new_nmethod(methodHandle method,
498  int compile_id,
499  int entry_bci,
500  CodeOffsets* offsets,
501  int orig_pc_offset,
502  DebugInformationRecorder* debug_info,
503  Dependencies* dependencies,
504  CodeBuffer* code_buffer, int frame_size,
505  OopMapSet* oop_maps,
506  ExceptionHandlerTable* handler_table,
507  ImplicitExceptionTable* nul_chk_table,
508  AbstractCompiler* compiler,
509  int comp_level
510)
511{
512  assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
513  // create nmethod
514  nmethod* nm = NULL;
515  { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
516    int nmethod_size =
517      allocation_size(code_buffer, sizeof(nmethod))
518      + adjust_pcs_size(debug_info->pcs_size())
519      + round_to(dependencies->size_in_bytes() , oopSize)
520      + round_to(handler_table->size_in_bytes(), oopSize)
521      + round_to(nul_chk_table->size_in_bytes(), oopSize)
522      + round_to(debug_info->data_size()       , oopSize);
523    nm = new (nmethod_size)
524      nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
525              orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
526              oop_maps,
527              handler_table,
528              nul_chk_table,
529              compiler,
530              comp_level);
531    if (nm != NULL) {
532      // To make dependency checking during class loading fast, record
533      // the nmethod dependencies in the classes it is dependent on.
534      // This allows the dependency checking code to simply walk the
535      // class hierarchy above the loaded class, checking only nmethods
536      // which are dependent on those classes.  The slow way is to
537      // check every nmethod for dependencies which makes it linear in
538      // the number of methods compiled.  For applications with a lot
539      // classes the slow way is too slow.
540      for (Dependencies::DepStream deps(nm); deps.next(); ) {
541        klassOop klass = deps.context_type();
542        if (klass == NULL)  continue;  // ignore things like evol_method
543
544        // record this nmethod as dependent on this klass
545        instanceKlass::cast(klass)->add_dependent_nmethod(nm);
546      }
547    }
548    NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_nmethod(nm));
549    if (PrintAssembly && nm != NULL)
550      Disassembler::decode(nm);
551  }
552
553  // verify nmethod
554  debug_only(if (nm) nm->verify();) // might block
555
556  if (nm != NULL) {
557    nm->log_new_nmethod();
558  }
559
560  // done
561  return nm;
562}
563
564
565// For native wrappers
566nmethod::nmethod(
567  methodOop method,
568  int nmethod_size,
569  CodeOffsets* offsets,
570  CodeBuffer* code_buffer,
571  int frame_size,
572  ByteSize basic_lock_owner_sp_offset,
573  ByteSize basic_lock_sp_offset,
574  OopMapSet* oop_maps )
575  : CodeBlob("native nmethod", code_buffer, sizeof(nmethod),
576             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
577  _compiled_synchronized_native_basic_lock_owner_sp_offset(basic_lock_owner_sp_offset),
578  _compiled_synchronized_native_basic_lock_sp_offset(basic_lock_sp_offset)
579{
580  {
581    debug_only(No_Safepoint_Verifier nsv;)
582    assert_locked_or_safepoint(CodeCache_lock);
583
584    NOT_PRODUCT(_has_debug_info = false; )
585    _method                  = method;
586    _entry_bci               = InvocationEntryBci;
587    _link                    = NULL;
588    _compiler                = NULL;
589    // We have no exception handler or deopt handler make the
590    // values something that will never match a pc like the nmethod vtable entry
591    _exception_offset        = 0;
592    _deoptimize_offset       = 0;
593    _orig_pc_offset          = 0;
594#ifdef HAVE_DTRACE_H
595    _trap_offset             = 0;
596#endif // def HAVE_DTRACE_H
597    _stub_offset             = data_offset();
598    _consts_offset           = data_offset();
599    _scopes_data_offset      = data_offset();
600    _scopes_pcs_offset       = _scopes_data_offset;
601    _dependencies_offset     = _scopes_pcs_offset;
602    _handler_table_offset    = _dependencies_offset;
603    _nul_chk_table_offset    = _handler_table_offset;
604    _nmethod_end_offset      = _nul_chk_table_offset;
605    _compile_id              = 0;  // default
606    _comp_level              = CompLevel_none;
607    _entry_point             = instructions_begin();
608    _verified_entry_point    = instructions_begin() + offsets->value(CodeOffsets::Verified_Entry);
609    _osr_entry_point         = NULL;
610    _exception_cache         = NULL;
611    _pc_desc_cache.reset_to(NULL);
612
613    flags.clear();
614    flags.state              = alive;
615    _markedForDeoptimization = 0;
616
617    _lock_count = 0;
618    _stack_traversal_mark    = 0;
619
620    code_buffer->copy_oops_to(this);
621    debug_only(check_store();)
622    CodeCache::commit(this);
623    VTune::create_nmethod(this);
624  }
625
626  if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
627    ttyLocker ttyl;  // keep the following output all in one block
628    // This output goes directly to the tty, not the compiler log.
629    // To enable tools to match it up with the compilation activity,
630    // be sure to tag this tty output with the compile ID.
631    if (xtty != NULL) {
632      xtty->begin_head("print_native_nmethod");
633      xtty->method(_method);
634      xtty->stamp();
635      xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
636    }
637    // print the header part first
638    print();
639    // then print the requested information
640    if (PrintNativeNMethods) {
641      print_code();
642      oop_maps->print();
643    }
644    if (PrintRelocations) {
645      print_relocations();
646    }
647    if (xtty != NULL) {
648      xtty->tail("print_native_nmethod");
649    }
650  }
651  Events::log("Create nmethod " INTPTR_FORMAT, this);
652}
653
654// For dtrace wrappers
655#ifdef HAVE_DTRACE_H
656nmethod::nmethod(
657  methodOop method,
658  int nmethod_size,
659  CodeOffsets* offsets,
660  CodeBuffer* code_buffer,
661  int frame_size)
662  : CodeBlob("dtrace nmethod", code_buffer, sizeof(nmethod),
663             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, NULL),
664  _compiled_synchronized_native_basic_lock_owner_sp_offset(in_ByteSize(-1)),
665  _compiled_synchronized_native_basic_lock_sp_offset(in_ByteSize(-1))
666{
667  {
668    debug_only(No_Safepoint_Verifier nsv;)
669    assert_locked_or_safepoint(CodeCache_lock);
670
671    NOT_PRODUCT(_has_debug_info = false; )
672    _method                  = method;
673    _entry_bci               = InvocationEntryBci;
674    _link                    = NULL;
675    _compiler                = NULL;
676    // We have no exception handler or deopt handler make the
677    // values something that will never match a pc like the nmethod vtable entry
678    _exception_offset        = 0;
679    _deoptimize_offset       = 0;
680    _trap_offset             = offsets->value(CodeOffsets::Dtrace_trap);
681    _orig_pc_offset          = 0;
682    _stub_offset             = data_offset();
683    _consts_offset           = data_offset();
684    _scopes_data_offset      = data_offset();
685    _scopes_pcs_offset       = _scopes_data_offset;
686    _dependencies_offset     = _scopes_pcs_offset;
687    _handler_table_offset    = _dependencies_offset;
688    _nul_chk_table_offset    = _handler_table_offset;
689    _nmethod_end_offset      = _nul_chk_table_offset;
690    _compile_id              = 0;  // default
691    _comp_level              = CompLevel_none;
692    _entry_point             = instructions_begin();
693    _verified_entry_point    = instructions_begin() + offsets->value(CodeOffsets::Verified_Entry);
694    _osr_entry_point         = NULL;
695    _exception_cache         = NULL;
696    _pc_desc_cache.reset_to(NULL);
697
698    flags.clear();
699    flags.state              = alive;
700    _markedForDeoptimization = 0;
701
702    _lock_count = 0;
703    _stack_traversal_mark    = 0;
704
705    code_buffer->copy_oops_to(this);
706    debug_only(check_store();)
707    CodeCache::commit(this);
708    VTune::create_nmethod(this);
709  }
710
711  if (PrintNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
712    ttyLocker ttyl;  // keep the following output all in one block
713    // This output goes directly to the tty, not the compiler log.
714    // To enable tools to match it up with the compilation activity,
715    // be sure to tag this tty output with the compile ID.
716    if (xtty != NULL) {
717      xtty->begin_head("print_dtrace_nmethod");
718      xtty->method(_method);
719      xtty->stamp();
720      xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
721    }
722    // print the header part first
723    print();
724    // then print the requested information
725    if (PrintNMethods) {
726      print_code();
727    }
728    if (PrintRelocations) {
729      print_relocations();
730    }
731    if (xtty != NULL) {
732      xtty->tail("print_dtrace_nmethod");
733    }
734  }
735  Events::log("Create nmethod " INTPTR_FORMAT, this);
736}
737#endif // def HAVE_DTRACE_H
738
739void* nmethod::operator new(size_t size, int nmethod_size) {
740  // Always leave some room in the CodeCache for I2C/C2I adapters
741  if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) return NULL;
742  return CodeCache::allocate(nmethod_size);
743}
744
745
746nmethod::nmethod(
747  methodOop method,
748  int nmethod_size,
749  int compile_id,
750  int entry_bci,
751  CodeOffsets* offsets,
752  int orig_pc_offset,
753  DebugInformationRecorder* debug_info,
754  Dependencies* dependencies,
755  CodeBuffer *code_buffer,
756  int frame_size,
757  OopMapSet* oop_maps,
758  ExceptionHandlerTable* handler_table,
759  ImplicitExceptionTable* nul_chk_table,
760  AbstractCompiler* compiler,
761  int comp_level
762  )
763  : CodeBlob("nmethod", code_buffer, sizeof(nmethod),
764             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
765  _compiled_synchronized_native_basic_lock_owner_sp_offset(in_ByteSize(-1)),
766  _compiled_synchronized_native_basic_lock_sp_offset(in_ByteSize(-1))
767{
768  assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
769  {
770    debug_only(No_Safepoint_Verifier nsv;)
771    assert_locked_or_safepoint(CodeCache_lock);
772
773    NOT_PRODUCT(_has_debug_info = false; )
774    _method                  = method;
775    _compile_id              = compile_id;
776    _comp_level              = comp_level;
777    _entry_bci               = entry_bci;
778    _link                    = NULL;
779    _compiler                = compiler;
780    _orig_pc_offset          = orig_pc_offset;
781#ifdef HAVE_DTRACE_H
782    _trap_offset             = 0;
783#endif // def HAVE_DTRACE_H
784    _stub_offset             = instructions_offset() + code_buffer->total_offset_of(code_buffer->stubs()->start());
785
786    // Exception handler and deopt handler are in the stub section
787    _exception_offset        = _stub_offset + offsets->value(CodeOffsets::Exceptions);
788    _deoptimize_offset       = _stub_offset + offsets->value(CodeOffsets::Deopt);
789    _consts_offset           = instructions_offset() + code_buffer->total_offset_of(code_buffer->consts()->start());
790    _scopes_data_offset      = data_offset();
791    _scopes_pcs_offset       = _scopes_data_offset   + round_to(debug_info->data_size         (), oopSize);
792    _dependencies_offset     = _scopes_pcs_offset    + adjust_pcs_size(debug_info->pcs_size());
793    _handler_table_offset    = _dependencies_offset  + round_to(dependencies->size_in_bytes (), oopSize);
794    _nul_chk_table_offset    = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize);
795    _nmethod_end_offset      = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize);
796
797    _entry_point             = instructions_begin();
798    _verified_entry_point    = instructions_begin() + offsets->value(CodeOffsets::Verified_Entry);
799    _osr_entry_point         = instructions_begin() + offsets->value(CodeOffsets::OSR_Entry);
800    _exception_cache         = NULL;
801    _pc_desc_cache.reset_to(scopes_pcs_begin());
802
803    flags.clear();
804    flags.state              = alive;
805    _markedForDeoptimization = 0;
806
807    _unload_reported         = false;           // jvmti state
808
809    _lock_count = 0;
810    _stack_traversal_mark    = 0;
811
812    // Copy contents of ScopeDescRecorder to nmethod
813    code_buffer->copy_oops_to(this);
814    debug_info->copy_to(this);
815    dependencies->copy_to(this);
816    debug_only(check_store();)
817
818    CodeCache::commit(this);
819
820    VTune::create_nmethod(this);
821
822    // Copy contents of ExceptionHandlerTable to nmethod
823    handler_table->copy_to(this);
824    nul_chk_table->copy_to(this);
825
826    // we use the information of entry points to find out if a method is
827    // static or non static
828    assert(compiler->is_c2() ||
829           _method->is_static() == (entry_point() == _verified_entry_point),
830           " entry points must be same for static methods and vice versa");
831  }
832
833  bool printnmethods = PrintNMethods
834    || CompilerOracle::should_print(_method)
835    || CompilerOracle::has_option_string(_method, "PrintNMethods");
836  if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
837    print_nmethod(printnmethods);
838  }
839
840  // Note: Do not verify in here as the CodeCache_lock is
841  //       taken which would conflict with the CompiledIC_lock
842  //       which taken during the verification of call sites.
843  //       (was bug - gri 10/25/99)
844
845  Events::log("Create nmethod " INTPTR_FORMAT, this);
846}
847
848
849// Print a short set of xml attributes to identify this nmethod.  The
850// output should be embedded in some other element.
851void nmethod::log_identity(xmlStream* log) const {
852  log->print(" compile_id='%d'", compile_id());
853  const char* nm_kind = compile_kind();
854  if (nm_kind != NULL)  log->print(" compile_kind='%s'", nm_kind);
855  if (compiler() != NULL) {
856    log->print(" compiler='%s'", compiler()->name());
857  }
858#ifdef TIERED
859  log->print(" level='%d'", comp_level());
860#endif // TIERED
861}
862
863
864#define LOG_OFFSET(log, name)                    \
865  if ((intptr_t)name##_end() - (intptr_t)name##_begin()) \
866    log->print(" " XSTR(name) "_offset='%d'"    , \
867               (intptr_t)name##_begin() - (intptr_t)this)
868
869
870void nmethod::log_new_nmethod() const {
871  if (LogCompilation && xtty != NULL) {
872    ttyLocker ttyl;
873    HandleMark hm;
874    xtty->begin_elem("nmethod");
875    log_identity(xtty);
876    xtty->print(" entry='" INTPTR_FORMAT "' size='%d'",
877                instructions_begin(), size());
878    xtty->print(" address='" INTPTR_FORMAT "'", (intptr_t) this);
879
880    LOG_OFFSET(xtty, relocation);
881    LOG_OFFSET(xtty, code);
882    LOG_OFFSET(xtty, stub);
883    LOG_OFFSET(xtty, consts);
884    LOG_OFFSET(xtty, scopes_data);
885    LOG_OFFSET(xtty, scopes_pcs);
886    LOG_OFFSET(xtty, dependencies);
887    LOG_OFFSET(xtty, handler_table);
888    LOG_OFFSET(xtty, nul_chk_table);
889    LOG_OFFSET(xtty, oops);
890
891    xtty->method(method());
892    xtty->stamp();
893    xtty->end_elem();
894  }
895}
896
897#undef LOG_OFFSET
898
899
900// Print out more verbose output usually for a newly created nmethod.
901void nmethod::print_on(outputStream* st, const char* title) const {
902  if (st != NULL) {
903    ttyLocker ttyl;
904    // Print a little tag line that looks like +PrintCompilation output:
905    st->print("%3d%c  %s",
906              compile_id(),
907              is_osr_method() ? '%' :
908              method() != NULL &&
909              is_native_method() ? 'n' : ' ',
910              title);
911#ifdef TIERED
912    st->print(" (%d) ", comp_level());
913#endif // TIERED
914    if (WizardMode) st->print(" (" INTPTR_FORMAT ")", this);
915    if (method() != NULL) {
916      method()->print_short_name(st);
917      if (is_osr_method())
918        st->print(" @ %d", osr_entry_bci());
919      if (method()->code_size() > 0)
920        st->print(" (%d bytes)", method()->code_size());
921    }
922  }
923}
924
925
926void nmethod::print_nmethod(bool printmethod) {
927  ttyLocker ttyl;  // keep the following output all in one block
928  if (xtty != NULL) {
929    xtty->begin_head("print_nmethod");
930    xtty->stamp();
931    xtty->end_head();
932  }
933  // print the header part first
934  print();
935  // then print the requested information
936  if (printmethod) {
937    print_code();
938    print_pcs();
939    oop_maps()->print();
940  }
941  if (PrintDebugInfo) {
942    print_scopes();
943  }
944  if (PrintRelocations) {
945    print_relocations();
946  }
947  if (PrintDependencies) {
948    print_dependencies();
949  }
950  if (PrintExceptionHandlers) {
951    print_handler_table();
952    print_nul_chk_table();
953  }
954  if (xtty != NULL) {
955    xtty->tail("print_nmethod");
956  }
957}
958
959
960void nmethod::set_version(int v) {
961  flags.version = v;
962}
963
964
965ScopeDesc* nmethod::scope_desc_at(address pc) {
966  PcDesc* pd = pc_desc_at(pc);
967  guarantee(pd != NULL, "scope must be present");
968  return new ScopeDesc(this, pd->scope_decode_offset(),
969                       pd->obj_decode_offset());
970}
971
972
973void nmethod::clear_inline_caches() {
974  assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
975  if (is_zombie()) {
976    return;
977  }
978
979  RelocIterator iter(this);
980  while (iter.next()) {
981    iter.reloc()->clear_inline_cache();
982  }
983}
984
985
986void nmethod::cleanup_inline_caches() {
987
988  assert(SafepointSynchronize::is_at_safepoint() &&
989        !CompiledIC_lock->is_locked() &&
990        !Patching_lock->is_locked(), "no threads must be updating the inline caches by them selfs");
991
992  // If the method is not entrant or zombie then a JMP is plastered over the
993  // first few bytes.  If an oop in the old code was there, that oop
994  // should not get GC'd.  Skip the first few bytes of oops on
995  // not-entrant methods.
996  address low_boundary = verified_entry_point();
997  if (!is_in_use()) {
998    low_boundary += NativeJump::instruction_size;
999    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1000    // This means that the low_boundary is going to be a little too high.
1001    // This shouldn't matter, since oops of non-entrant methods are never used.
1002    // In fact, why are we bothering to look at oops in a non-entrant method??
1003  }
1004
1005  // Find all calls in an nmethod, and clear the ones that points to zombie methods
1006  ResourceMark rm;
1007  RelocIterator iter(this, low_boundary);
1008  while(iter.next()) {
1009    switch(iter.type()) {
1010      case relocInfo::virtual_call_type:
1011      case relocInfo::opt_virtual_call_type: {
1012        CompiledIC *ic = CompiledIC_at(iter.reloc());
1013        // Ok, to lookup references to zombies here
1014        CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
1015        if( cb != NULL && cb->is_nmethod() ) {
1016          nmethod* nm = (nmethod*)cb;
1017          // Clean inline caches pointing to both zombie and not_entrant methods
1018          if (!nm->is_in_use()) ic->set_to_clean();
1019        }
1020        break;
1021      }
1022      case relocInfo::static_call_type: {
1023        CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
1024        CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
1025        if( cb != NULL && cb->is_nmethod() ) {
1026          nmethod* nm = (nmethod*)cb;
1027          // Clean inline caches pointing to both zombie and not_entrant methods
1028          if (!nm->is_in_use()) csc->set_to_clean();
1029        }
1030        break;
1031      }
1032    }
1033  }
1034}
1035
1036void nmethod::mark_as_seen_on_stack() {
1037  assert(is_not_entrant(), "must be a non-entrant method");
1038  set_stack_traversal_mark(NMethodSweeper::traversal_count());
1039}
1040
1041// Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack)
1042bool nmethod::can_not_entrant_be_converted() {
1043  assert(is_not_entrant(), "must be a non-entrant method");
1044  assert(SafepointSynchronize::is_at_safepoint(), "must be called during a safepoint");
1045
1046  // Since the nmethod sweeper only does partial sweep the sweeper's traversal
1047  // count can be greater than the stack traversal count before it hits the
1048  // nmethod for the second time.
1049  return stack_traversal_mark()+1 < NMethodSweeper::traversal_count();
1050}
1051
1052void nmethod::inc_decompile_count() {
1053  // Could be gated by ProfileTraps, but do not bother...
1054  methodOop m = method();
1055  if (m == NULL)  return;
1056  methodDataOop mdo = m->method_data();
1057  if (mdo == NULL)  return;
1058  // There is a benign race here.  See comments in methodDataOop.hpp.
1059  mdo->inc_decompile_count();
1060}
1061
1062void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
1063
1064  post_compiled_method_unload();
1065
1066  // Since this nmethod is being unloaded, make sure that dependencies
1067  // recorded in instanceKlasses get flushed and pass non-NULL closure to
1068  // indicate that this work is being done during a GC.
1069  assert(Universe::heap()->is_gc_active(), "should only be called during gc");
1070  assert(is_alive != NULL, "Should be non-NULL");
1071  // A non-NULL is_alive closure indicates that this is being called during GC.
1072  flush_dependencies(is_alive);
1073
1074  // Break cycle between nmethod & method
1075  if (TraceClassUnloading && WizardMode) {
1076    tty->print_cr("[Class unloading: Making nmethod " INTPTR_FORMAT
1077                  " unloadable], methodOop(" INTPTR_FORMAT
1078                  "), cause(" INTPTR_FORMAT ")",
1079                  this, (address)_method, (address)cause);
1080    cause->klass()->print();
1081  }
1082  // If _method is already NULL the methodOop is about to be unloaded,
1083  // so we don't have to break the cycle. Note that it is possible to
1084  // have the methodOop live here, in case we unload the nmethod because
1085  // it is pointing to some oop (other than the methodOop) being unloaded.
1086  if (_method != NULL) {
1087    // OSR methods point to the methodOop, but the methodOop does not
1088    // point back!
1089    if (_method->code() == this) {
1090      _method->clear_code(); // Break a cycle
1091    }
1092    inc_decompile_count();     // Last chance to make a mark on the MDO
1093    _method = NULL;            // Clear the method of this dead nmethod
1094  }
1095  // Make the class unloaded - i.e., change state and notify sweeper
1096  check_safepoint();
1097  if (is_in_use()) {
1098    // Transitioning directly from live to unloaded -- so
1099    // we need to force a cache clean-up; remember this
1100    // for later on.
1101    CodeCache::set_needs_cache_clean(true);
1102  }
1103  flags.state = unloaded;
1104
1105  // The methodOop is gone at this point
1106  assert(_method == NULL, "Tautology");
1107
1108  set_link(NULL);
1109  NMethodSweeper::notify(this);
1110}
1111
1112void nmethod::invalidate_osr_method() {
1113  assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
1114  if (_entry_bci != InvalidOSREntryBci)
1115    inc_decompile_count();
1116  // Remove from list of active nmethods
1117  if (method() != NULL)
1118    instanceKlass::cast(method()->method_holder())->remove_osr_nmethod(this);
1119  // Set entry as invalid
1120  _entry_bci = InvalidOSREntryBci;
1121}
1122
1123void nmethod::log_state_change(int state) const {
1124  if (LogCompilation) {
1125    if (xtty != NULL) {
1126      ttyLocker ttyl;  // keep the following output all in one block
1127      xtty->begin_elem("make_not_entrant %sthread='" UINTX_FORMAT "'",
1128                       (state == zombie ? "zombie='1' " : ""),
1129                       os::current_thread_id());
1130      log_identity(xtty);
1131      xtty->stamp();
1132      xtty->end_elem();
1133    }
1134  }
1135  if (PrintCompilation) {
1136    print_on(tty, state == zombie ? "made zombie " : "made not entrant ");
1137    tty->cr();
1138  }
1139}
1140
1141// Common functionality for both make_not_entrant and make_zombie
1142void nmethod::make_not_entrant_or_zombie(int state) {
1143  assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
1144
1145  // Code for an on-stack-replacement nmethod is removed when a class gets unloaded.
1146  // They never become zombie/non-entrant, so the nmethod sweeper will never remove
1147  // them. Instead the entry_bci is set to InvalidOSREntryBci, so the osr nmethod
1148  // will never be used anymore. That the nmethods only gets removed when class unloading
1149  // happens, make life much simpler, since the nmethods are not just going to disappear
1150  // out of the blue.
1151  if (is_osr_only_method()) {
1152    if (osr_entry_bci() != InvalidOSREntryBci) {
1153      // only log this once
1154      log_state_change(state);
1155    }
1156    invalidate_osr_method();
1157    return;
1158  }
1159
1160  // If the method is already zombie or set to the state we want, nothing to do
1161  if (is_zombie() || (state == not_entrant && is_not_entrant())) {
1162    return;
1163  }
1164
1165  log_state_change(state);
1166
1167  // Make sure the nmethod is not flushed in case of a safepoint in code below.
1168  nmethodLocker nml(this);
1169
1170  {
1171    // Enter critical section.  Does not block for safepoint.
1172    MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
1173    // The caller can be calling the method statically or through an inline
1174    // cache call.
1175    if (!is_not_entrant()) {
1176      NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
1177                  SharedRuntime::get_handle_wrong_method_stub());
1178      assert (NativeJump::instruction_size == nmethod::_zombie_instruction_size, "");
1179    }
1180
1181    // When the nmethod becomes zombie it is no longer alive so the
1182    // dependencies must be flushed.  nmethods in the not_entrant
1183    // state will be flushed later when the transition to zombie
1184    // happens or they get unloaded.
1185    if (state == zombie) {
1186      assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
1187      flush_dependencies(NULL);
1188    } else {
1189      assert(state == not_entrant, "other cases may need to be handled differently");
1190    }
1191
1192    // Change state
1193    flags.state = state;
1194  } // leave critical region under Patching_lock
1195
1196  if (state == not_entrant) {
1197    Events::log("Make nmethod not entrant " INTPTR_FORMAT, this);
1198  } else {
1199    Events::log("Make nmethod zombie " INTPTR_FORMAT, this);
1200  }
1201
1202  if (TraceCreateZombies) {
1203    tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie");
1204  }
1205
1206  // Make sweeper aware that there is a zombie method that needs to be removed
1207  NMethodSweeper::notify(this);
1208
1209  // not_entrant only stuff
1210  if (state == not_entrant) {
1211    mark_as_seen_on_stack();
1212  }
1213
1214  // It's a true state change, so mark the method as decompiled.
1215  inc_decompile_count();
1216
1217
1218  // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event
1219  // and it hasn't already been reported for this nmethod then report it now.
1220  // (the event may have been reported earilier if the GC marked it for unloading).
1221  if (state == zombie) {
1222
1223    DTRACE_METHOD_UNLOAD_PROBE(method());
1224
1225    if (JvmtiExport::should_post_compiled_method_unload() &&
1226        !unload_reported()) {
1227      assert(method() != NULL, "checking");
1228      {
1229        HandleMark hm;
1230        JvmtiExport::post_compiled_method_unload_at_safepoint(
1231            method()->jmethod_id(), code_begin());
1232      }
1233      set_unload_reported();
1234    }
1235  }
1236
1237
1238  // Zombie only stuff
1239  if (state == zombie) {
1240    VTune::delete_nmethod(this);
1241  }
1242
1243  // Check whether method got unloaded at a safepoint before this,
1244  // if so we can skip the flushing steps below
1245  if (method() == NULL) return;
1246
1247  // Remove nmethod from method.
1248  // We need to check if both the _code and _from_compiled_code_entry_point
1249  // refer to this nmethod because there is a race in setting these two fields
1250  // in methodOop as seen in bugid 4947125.
1251  // If the vep() points to the zombie nmethod, the memory for the nmethod
1252  // could be flushed and the compiler and vtable stubs could still call
1253  // through it.
1254  if (method()->code() == this ||
1255      method()->from_compiled_entry() == verified_entry_point()) {
1256    HandleMark hm;
1257    method()->clear_code();
1258  }
1259}
1260
1261
1262#ifndef PRODUCT
1263void nmethod::check_safepoint() {
1264  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1265}
1266#endif
1267
1268
1269void nmethod::flush() {
1270  // Note that there are no valid oops in the nmethod anymore.
1271  assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
1272  assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
1273
1274  assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
1275  check_safepoint();
1276
1277  // completely deallocate this method
1278  EventMark m("flushing nmethod " INTPTR_FORMAT " %s", this, "");
1279  if (PrintMethodFlushing) {
1280    tty->print_cr("*flushing nmethod " INTPTR_FORMAT ". Live blobs: %d", this, CodeCache::nof_blobs());
1281  }
1282
1283  // We need to deallocate any ExceptionCache data.
1284  // Note that we do not need to grab the nmethod lock for this, it
1285  // better be thread safe if we're disposing of it!
1286  ExceptionCache* ec = exception_cache();
1287  set_exception_cache(NULL);
1288  while(ec != NULL) {
1289    ExceptionCache* next = ec->next();
1290    delete ec;
1291    ec = next;
1292  }
1293
1294  ((CodeBlob*)(this))->flush();
1295
1296  CodeCache::free(this);
1297}
1298
1299
1300//
1301// Notify all classes this nmethod is dependent on that it is no
1302// longer dependent. This should only be called in two situations.
1303// First, when a nmethod transitions to a zombie all dependents need
1304// to be clear.  Since zombification happens at a safepoint there's no
1305// synchronization issues.  The second place is a little more tricky.
1306// During phase 1 of mark sweep class unloading may happen and as a
1307// result some nmethods may get unloaded.  In this case the flushing
1308// of dependencies must happen during phase 1 since after GC any
1309// dependencies in the unloaded nmethod won't be updated, so
1310// traversing the dependency information in unsafe.  In that case this
1311// function is called with a non-NULL argument and this function only
1312// notifies instanceKlasses that are reachable
1313
1314void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
1315  assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
1316  assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
1317  "is_alive is non-NULL if and only if we are called during GC");
1318  if (!has_flushed_dependencies()) {
1319    set_has_flushed_dependencies();
1320    for (Dependencies::DepStream deps(this); deps.next(); ) {
1321      klassOop klass = deps.context_type();
1322      if (klass == NULL)  continue;  // ignore things like evol_method
1323
1324      // During GC the is_alive closure is non-NULL, and is used to
1325      // determine liveness of dependees that need to be updated.
1326      if (is_alive == NULL || is_alive->do_object_b(klass)) {
1327        instanceKlass::cast(klass)->remove_dependent_nmethod(this);
1328      }
1329    }
1330  }
1331}
1332
1333
1334// If this oop is not live, the nmethod can be unloaded.
1335bool nmethod::can_unload(BoolObjectClosure* is_alive,
1336                         OopClosure* keep_alive,
1337                         oop* root, bool unloading_occurred) {
1338  assert(root != NULL, "just checking");
1339  oop obj = *root;
1340  if (obj == NULL || is_alive->do_object_b(obj)) {
1341      return false;
1342  }
1343  if (obj->is_compiledICHolder()) {
1344    compiledICHolderOop cichk_oop = compiledICHolderOop(obj);
1345    if (is_alive->do_object_b(
1346          cichk_oop->holder_method()->method_holder()) &&
1347        is_alive->do_object_b(cichk_oop->holder_klass())) {
1348      // The oop should be kept alive
1349      keep_alive->do_oop(root);
1350      return false;
1351    }
1352  }
1353  if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
1354    // Cannot do this test if verification of the UseParallelOldGC
1355    // code using the PSMarkSweep code is being done.
1356    assert(unloading_occurred, "Inconsistency in unloading");
1357  }
1358  make_unloaded(is_alive, obj);
1359  return true;
1360}
1361
1362// ------------------------------------------------------------------
1363// post_compiled_method_load_event
1364// new method for install_code() path
1365// Transfer information from compilation to jvmti
1366void nmethod::post_compiled_method_load_event() {
1367
1368  methodOop moop = method();
1369  HS_DTRACE_PROBE8(hotspot, compiled__method__load,
1370      moop->klass_name()->bytes(),
1371      moop->klass_name()->utf8_length(),
1372      moop->name()->bytes(),
1373      moop->name()->utf8_length(),
1374      moop->signature()->bytes(),
1375      moop->signature()->utf8_length(),
1376      code_begin(), code_size());
1377
1378  if (JvmtiExport::should_post_compiled_method_load()) {
1379    JvmtiExport::post_compiled_method_load(this);
1380  }
1381}
1382
1383void nmethod::post_compiled_method_unload() {
1384  assert(_method != NULL && !is_unloaded(), "just checking");
1385  DTRACE_METHOD_UNLOAD_PROBE(method());
1386
1387  // If a JVMTI agent has enabled the CompiledMethodUnload event then
1388  // post the event. Sometime later this nmethod will be made a zombie by
1389  // the sweeper but the methodOop will not be valid at that point.
1390  if (JvmtiExport::should_post_compiled_method_unload()) {
1391    assert(!unload_reported(), "already unloaded");
1392    HandleMark hm;
1393    JvmtiExport::post_compiled_method_unload_at_safepoint(
1394                      method()->jmethod_id(), code_begin());
1395  }
1396
1397  // The JVMTI CompiledMethodUnload event can be enabled or disabled at
1398  // any time. As the nmethod is being unloaded now we mark it has
1399  // having the unload event reported - this will ensure that we don't
1400  // attempt to report the event in the unlikely scenario where the
1401  // event is enabled at the time the nmethod is made a zombie.
1402  set_unload_reported();
1403}
1404
1405// This is called at the end of the strong tracing/marking phase of a
1406// GC to unload an nmethod if it contains otherwise unreachable
1407// oops.
1408
1409void nmethod::do_unloading(BoolObjectClosure* is_alive,
1410                           OopClosure* keep_alive, bool unloading_occurred) {
1411  // Make sure the oop's ready to receive visitors
1412  assert(!is_zombie() && !is_unloaded(),
1413         "should not call follow on zombie or unloaded nmethod");
1414
1415  // If the method is not entrant then a JMP is plastered over the
1416  // first few bytes.  If an oop in the old code was there, that oop
1417  // should not get GC'd.  Skip the first few bytes of oops on
1418  // not-entrant methods.
1419  address low_boundary = verified_entry_point();
1420  if (is_not_entrant()) {
1421    low_boundary += NativeJump::instruction_size;
1422    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1423    // (See comment above.)
1424  }
1425
1426  // The RedefineClasses() API can cause the class unloading invariant
1427  // to no longer be true. See jvmtiExport.hpp for details.
1428  // Also, leave a debugging breadcrumb in local flag.
1429  bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
1430  if (a_class_was_redefined) {
1431    // This set of the unloading_occurred flag is done before the
1432    // call to post_compiled_method_unload() so that the unloading
1433    // of this nmethod is reported.
1434    unloading_occurred = true;
1435  }
1436
1437  // Follow methodOop
1438  if (can_unload(is_alive, keep_alive, (oop*)&_method, unloading_occurred)) {
1439    return;
1440  }
1441
1442  // Exception cache
1443  ExceptionCache* ec = exception_cache();
1444  while (ec != NULL) {
1445    oop* ex_addr = (oop*)ec->exception_type_addr();
1446    oop ex = *ex_addr;
1447    ExceptionCache* next_ec = ec->next();
1448    if (ex != NULL && !is_alive->do_object_b(ex)) {
1449      assert(!ex->is_compiledICHolder(), "Possible error here");
1450      remove_from_exception_cache(ec);
1451    }
1452    ec = next_ec;
1453  }
1454
1455  // If class unloading occurred we first iterate over all inline caches and
1456  // clear ICs where the cached oop is referring to an unloaded klass or method.
1457  // The remaining live cached oops will be traversed in the relocInfo::oop_type
1458  // iteration below.
1459  if (unloading_occurred) {
1460    RelocIterator iter(this, low_boundary);
1461    while(iter.next()) {
1462      if (iter.type() == relocInfo::virtual_call_type) {
1463        CompiledIC *ic = CompiledIC_at(iter.reloc());
1464        oop ic_oop = ic->cached_oop();
1465        if (ic_oop != NULL && !is_alive->do_object_b(ic_oop)) {
1466          // The only exception is compiledICHolder oops which may
1467          // yet be marked below. (We check this further below).
1468          if (ic_oop->is_compiledICHolder()) {
1469            compiledICHolderOop cichk_oop = compiledICHolderOop(ic_oop);
1470            if (is_alive->do_object_b(
1471                  cichk_oop->holder_method()->method_holder()) &&
1472                is_alive->do_object_b(cichk_oop->holder_klass())) {
1473              continue;
1474            }
1475          }
1476          ic->set_to_clean();
1477          assert(ic->cached_oop() == NULL, "cached oop in IC should be cleared")
1478        }
1479      }
1480    }
1481  }
1482
1483  // Compiled code
1484  RelocIterator iter(this, low_boundary);
1485  while (iter.next()) {
1486    if (iter.type() == relocInfo::oop_type) {
1487      oop_Relocation* r = iter.oop_reloc();
1488      // In this loop, we must only traverse those oops directly embedded in
1489      // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
1490      assert(1 == (r->oop_is_immediate()) +
1491                  (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1492             "oop must be found in exactly one place");
1493      if (r->oop_is_immediate() && r->oop_value() != NULL) {
1494        if (can_unload(is_alive, keep_alive, r->oop_addr(), unloading_occurred)) {
1495          return;
1496        }
1497      }
1498    }
1499  }
1500
1501
1502  // Scopes
1503  for (oop* p = oops_begin(); p < oops_end(); p++) {
1504    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
1505    if (can_unload(is_alive, keep_alive, p, unloading_occurred)) {
1506      return;
1507    }
1508  }
1509
1510#ifndef PRODUCT
1511  // This nmethod was not unloaded; check below that all CompiledICs
1512  // refer to marked oops.
1513  {
1514    RelocIterator iter(this, low_boundary);
1515    while (iter.next()) {
1516      if (iter.type() == relocInfo::virtual_call_type) {
1517         CompiledIC *ic = CompiledIC_at(iter.reloc());
1518         oop ic_oop = ic->cached_oop();
1519         assert(ic_oop == NULL || is_alive->do_object_b(ic_oop),
1520                "Found unmarked ic_oop in reachable nmethod");
1521       }
1522    }
1523  }
1524#endif // !PRODUCT
1525}
1526
1527void nmethod::oops_do(OopClosure* f) {
1528  // make sure the oops ready to receive visitors
1529  assert(!is_zombie() && !is_unloaded(),
1530         "should not call follow on zombie or unloaded nmethod");
1531
1532  // If the method is not entrant or zombie then a JMP is plastered over the
1533  // first few bytes.  If an oop in the old code was there, that oop
1534  // should not get GC'd.  Skip the first few bytes of oops on
1535  // not-entrant methods.
1536  address low_boundary = verified_entry_point();
1537  if (is_not_entrant()) {
1538    low_boundary += NativeJump::instruction_size;
1539    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1540    // (See comment above.)
1541  }
1542
1543  // Compiled code
1544  f->do_oop((oop*) &_method);
1545  ExceptionCache* ec = exception_cache();
1546  while(ec != NULL) {
1547    f->do_oop((oop*)ec->exception_type_addr());
1548    ec = ec->next();
1549  }
1550
1551  RelocIterator iter(this, low_boundary);
1552  while (iter.next()) {
1553    if (iter.type() == relocInfo::oop_type ) {
1554      oop_Relocation* r = iter.oop_reloc();
1555      // In this loop, we must only follow those oops directly embedded in
1556      // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
1557      assert(1 == (r->oop_is_immediate()) + (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()), "oop must be found in exactly one place");
1558      if (r->oop_is_immediate() && r->oop_value() != NULL) {
1559        f->do_oop(r->oop_addr());
1560      }
1561    }
1562  }
1563
1564  // Scopes
1565  for (oop* p = oops_begin(); p < oops_end(); p++) {
1566    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
1567    f->do_oop(p);
1568  }
1569}
1570
1571// Method that knows how to preserve outgoing arguments at call. This method must be
1572// called with a frame corresponding to a Java invoke
1573void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
1574  if (!method()->is_native()) {
1575    SimpleScopeDesc ssd(this, fr.pc());
1576    Bytecode_invoke* call = Bytecode_invoke_at(ssd.method(), ssd.bci());
1577    bool is_static = call->is_invokestatic();
1578    symbolOop signature = call->signature();
1579    fr.oops_compiled_arguments_do(signature, is_static, reg_map, f);
1580  }
1581}
1582
1583
1584oop nmethod::embeddedOop_at(u_char* p) {
1585  RelocIterator iter(this, p, p + oopSize);
1586  while (iter.next())
1587    if (iter.type() == relocInfo::oop_type) {
1588      return iter.oop_reloc()->oop_value();
1589    }
1590  return NULL;
1591}
1592
1593
1594inline bool includes(void* p, void* from, void* to) {
1595  return from <= p && p < to;
1596}
1597
1598
1599void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {
1600  assert(count >= 2, "must be sentinel values, at least");
1601
1602#ifdef ASSERT
1603  // must be sorted and unique; we do a binary search in find_pc_desc()
1604  int prev_offset = pcs[0].pc_offset();
1605  assert(prev_offset == PcDesc::lower_offset_limit,
1606         "must start with a sentinel");
1607  for (int i = 1; i < count; i++) {
1608    int this_offset = pcs[i].pc_offset();
1609    assert(this_offset > prev_offset, "offsets must be sorted");
1610    prev_offset = this_offset;
1611  }
1612  assert(prev_offset == PcDesc::upper_offset_limit,
1613         "must end with a sentinel");
1614#endif //ASSERT
1615
1616  int size = count * sizeof(PcDesc);
1617  assert(scopes_pcs_size() >= size, "oob");
1618  memcpy(scopes_pcs_begin(), pcs, size);
1619
1620  // Adjust the final sentinel downward.
1621  PcDesc* last_pc = &scopes_pcs_begin()[count-1];
1622  assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity");
1623  last_pc->set_pc_offset(instructions_size() + 1);
1624  for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) {
1625    // Fill any rounding gaps with copies of the last record.
1626    last_pc[1] = last_pc[0];
1627  }
1628  // The following assert could fail if sizeof(PcDesc) is not
1629  // an integral multiple of oopSize (the rounding term).
1630  // If it fails, change the logic to always allocate a multiple
1631  // of sizeof(PcDesc), and fill unused words with copies of *last_pc.
1632  assert(last_pc + 1 == scopes_pcs_end(), "must match exactly");
1633}
1634
1635void nmethod::copy_scopes_data(u_char* buffer, int size) {
1636  assert(scopes_data_size() >= size, "oob");
1637  memcpy(scopes_data_begin(), buffer, size);
1638}
1639
1640
1641#ifdef ASSERT
1642static PcDesc* linear_search(nmethod* nm, int pc_offset, bool approximate) {
1643  PcDesc* lower = nm->scopes_pcs_begin();
1644  PcDesc* upper = nm->scopes_pcs_end();
1645  lower += 1; // exclude initial sentinel
1646  PcDesc* res = NULL;
1647  for (PcDesc* p = lower; p < upper; p++) {
1648    NOT_PRODUCT(--nmethod_stats.pc_desc_tests);  // don't count this call to match_desc
1649    if (match_desc(p, pc_offset, approximate)) {
1650      if (res == NULL)
1651        res = p;
1652      else
1653        res = (PcDesc*) badAddress;
1654    }
1655  }
1656  return res;
1657}
1658#endif
1659
1660
1661// Finds a PcDesc with real-pc equal to "pc"
1662PcDesc* nmethod::find_pc_desc_internal(address pc, bool approximate) {
1663  address base_address = instructions_begin();
1664  if ((pc < base_address) ||
1665      (pc - base_address) >= (ptrdiff_t) PcDesc::upper_offset_limit) {
1666    return NULL;  // PC is wildly out of range
1667  }
1668  int pc_offset = (int) (pc - base_address);
1669
1670  // Check the PcDesc cache if it contains the desired PcDesc
1671  // (This as an almost 100% hit rate.)
1672  PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate);
1673  if (res != NULL) {
1674    assert(res == linear_search(this, pc_offset, approximate), "cache ok");
1675    return res;
1676  }
1677
1678  // Fallback algorithm: quasi-linear search for the PcDesc
1679  // Find the last pc_offset less than the given offset.
1680  // The successor must be the required match, if there is a match at all.
1681  // (Use a fixed radix to avoid expensive affine pointer arithmetic.)
1682  PcDesc* lower = scopes_pcs_begin();
1683  PcDesc* upper = scopes_pcs_end();
1684  upper -= 1; // exclude final sentinel
1685  if (lower >= upper)  return NULL;  // native method; no PcDescs at all
1686
1687#define assert_LU_OK \
1688  /* invariant on lower..upper during the following search: */ \
1689  assert(lower->pc_offset() <  pc_offset, "sanity"); \
1690  assert(upper->pc_offset() >= pc_offset, "sanity")
1691  assert_LU_OK;
1692
1693  // Use the last successful return as a split point.
1694  PcDesc* mid = _pc_desc_cache.last_pc_desc();
1695  NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
1696  if (mid->pc_offset() < pc_offset) {
1697    lower = mid;
1698  } else {
1699    upper = mid;
1700  }
1701
1702  // Take giant steps at first (4096, then 256, then 16, then 1)
1703  const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ debug_only(-1);
1704  const int RADIX = (1 << LOG2_RADIX);
1705  for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) {
1706    while ((mid = lower + step) < upper) {
1707      assert_LU_OK;
1708      NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
1709      if (mid->pc_offset() < pc_offset) {
1710        lower = mid;
1711      } else {
1712        upper = mid;
1713        break;
1714      }
1715    }
1716    assert_LU_OK;
1717  }
1718
1719  // Sneak up on the value with a linear search of length ~16.
1720  while (true) {
1721    assert_LU_OK;
1722    mid = lower + 1;
1723    NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
1724    if (mid->pc_offset() < pc_offset) {
1725      lower = mid;
1726    } else {
1727      upper = mid;
1728      break;
1729    }
1730  }
1731#undef assert_LU_OK
1732
1733  if (match_desc(upper, pc_offset, approximate)) {
1734    assert(upper == linear_search(this, pc_offset, approximate), "search ok");
1735    _pc_desc_cache.add_pc_desc(upper);
1736    return upper;
1737  } else {
1738    assert(NULL == linear_search(this, pc_offset, approximate), "search ok");
1739    return NULL;
1740  }
1741}
1742
1743
1744bool nmethod::check_all_dependencies() {
1745  bool found_check = false;
1746  // wholesale check of all dependencies
1747  for (Dependencies::DepStream deps(this); deps.next(); ) {
1748    if (deps.check_dependency() != NULL) {
1749      found_check = true;
1750      NOT_DEBUG(break);
1751    }
1752  }
1753  return found_check;  // tell caller if we found anything
1754}
1755
1756bool nmethod::check_dependency_on(DepChange& changes) {
1757  // What has happened:
1758  // 1) a new class dependee has been added
1759  // 2) dependee and all its super classes have been marked
1760  bool found_check = false;  // set true if we are upset
1761  for (Dependencies::DepStream deps(this); deps.next(); ) {
1762    // Evaluate only relevant dependencies.
1763    if (deps.spot_check_dependency_at(changes) != NULL) {
1764      found_check = true;
1765      NOT_DEBUG(break);
1766    }
1767  }
1768  return found_check;
1769}
1770
1771bool nmethod::is_evol_dependent_on(klassOop dependee) {
1772  instanceKlass *dependee_ik = instanceKlass::cast(dependee);
1773  objArrayOop dependee_methods = dependee_ik->methods();
1774  for (Dependencies::DepStream deps(this); deps.next(); ) {
1775    if (deps.type() == Dependencies::evol_method) {
1776      methodOop method = deps.method_argument(0);
1777      for (int j = 0; j < dependee_methods->length(); j++) {
1778        if ((methodOop) dependee_methods->obj_at(j) == method) {
1779          // RC_TRACE macro has an embedded ResourceMark
1780          RC_TRACE(0x01000000,
1781            ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)",
1782            _method->method_holder()->klass_part()->external_name(),
1783            _method->name()->as_C_string(),
1784            _method->signature()->as_C_string(), compile_id(),
1785            method->method_holder()->klass_part()->external_name(),
1786            method->name()->as_C_string(),
1787            method->signature()->as_C_string()));
1788          if (TraceDependencies || LogCompilation)
1789            deps.log_dependency(dependee);
1790          return true;
1791        }
1792      }
1793    }
1794  }
1795  return false;
1796}
1797
1798// Called from mark_for_deoptimization, when dependee is invalidated.
1799bool nmethod::is_dependent_on_method(methodOop dependee) {
1800  for (Dependencies::DepStream deps(this); deps.next(); ) {
1801    if (deps.type() != Dependencies::evol_method)
1802      continue;
1803    methodOop method = deps.method_argument(0);
1804    if (method == dependee) return true;
1805  }
1806  return false;
1807}
1808
1809
1810bool nmethod::is_patchable_at(address instr_addr) {
1811  assert (code_contains(instr_addr), "wrong nmethod used");
1812  if (is_zombie()) {
1813    // a zombie may never be patched
1814    return false;
1815  }
1816  return true;
1817}
1818
1819
1820address nmethod::continuation_for_implicit_exception(address pc) {
1821  // Exception happened outside inline-cache check code => we are inside
1822  // an active nmethod => use cpc to determine a return address
1823  int exception_offset = pc - instructions_begin();
1824  int cont_offset = ImplicitExceptionTable(this).at( exception_offset );
1825#ifdef ASSERT
1826  if (cont_offset == 0) {
1827    Thread* thread = ThreadLocalStorage::get_thread_slow();
1828    ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY
1829    HandleMark hm(thread);
1830    ResourceMark rm(thread);
1831    CodeBlob* cb = CodeCache::find_blob(pc);
1832    assert(cb != NULL && cb == this, "");
1833    tty->print_cr("implicit exception happened at " INTPTR_FORMAT, pc);
1834    print();
1835    method()->print_codes();
1836    print_code();
1837    print_pcs();
1838  }
1839#endif
1840  guarantee(cont_offset != 0, "unhandled implicit exception in compiled code");
1841  return instructions_begin() + cont_offset;
1842}
1843
1844
1845
1846void nmethod_init() {
1847  // make sure you didn't forget to adjust the filler fields
1848  assert(sizeof(nmFlags) <= 4,           "nmFlags occupies more than a word");
1849  assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
1850}
1851
1852
1853//-------------------------------------------------------------------------------------------
1854
1855
1856// QQQ might we make this work from a frame??
1857nmethodLocker::nmethodLocker(address pc) {
1858  CodeBlob* cb = CodeCache::find_blob(pc);
1859  guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found");
1860  _nm = (nmethod*)cb;
1861  lock_nmethod(_nm);
1862}
1863
1864void nmethodLocker::lock_nmethod(nmethod* nm) {
1865  if (nm == NULL)  return;
1866  Atomic::inc(&nm->_lock_count);
1867  guarantee(!nm->is_zombie(), "cannot lock a zombie method");
1868}
1869
1870void nmethodLocker::unlock_nmethod(nmethod* nm) {
1871  if (nm == NULL)  return;
1872  Atomic::dec(&nm->_lock_count);
1873  guarantee(nm->_lock_count >= 0, "unmatched nmethod lock/unlock");
1874}
1875
1876bool nmethod::is_deopt_pc(address pc) {
1877  bool ret =  pc == deopt_handler_begin();
1878  return ret;
1879}
1880
1881
1882// -----------------------------------------------------------------------------
1883// Verification
1884
1885void nmethod::verify() {
1886
1887  // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
1888  // seems odd.
1889
1890  if( is_zombie() || is_not_entrant() )
1891    return;
1892
1893  // Make sure all the entry points are correctly aligned for patching.
1894  NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());
1895
1896  assert(method()->is_oop(), "must be valid");
1897
1898  ResourceMark rm;
1899
1900  if (!CodeCache::contains(this)) {
1901    fatal1("nmethod at " INTPTR_FORMAT " not in zone", this);
1902  }
1903
1904  if(is_native_method() )
1905    return;
1906
1907  nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
1908  if (nm != this) {
1909    fatal1("findNMethod did not find this nmethod (" INTPTR_FORMAT ")", this);
1910  }
1911
1912  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
1913    if (! p->verify(this)) {
1914      tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", this);
1915    }
1916  }
1917
1918  verify_scopes();
1919}
1920
1921
1922void nmethod::verify_interrupt_point(address call_site) {
1923  // This code does not work in release mode since
1924  // owns_lock only is available in debug mode.
1925  CompiledIC* ic = NULL;
1926  Thread *cur = Thread::current();
1927  if (CompiledIC_lock->owner() == cur ||
1928      ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) &&
1929       SafepointSynchronize::is_at_safepoint())) {
1930    ic = CompiledIC_at(call_site);
1931    CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
1932  } else {
1933    MutexLocker ml_verify (CompiledIC_lock);
1934    ic = CompiledIC_at(call_site);
1935  }
1936  PcDesc* pd = pc_desc_at(ic->end_of_call());
1937  assert(pd != NULL, "PcDesc must exist");
1938  for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(),
1939                                     pd->obj_decode_offset());
1940       !sd->is_top(); sd = sd->sender()) {
1941    sd->verify();
1942  }
1943}
1944
1945void nmethod::verify_scopes() {
1946  if( !method() ) return;       // Runtime stubs have no scope
1947  if (method()->is_native()) return; // Ignore stub methods.
1948  // iterate through all interrupt point
1949  // and verify the debug information is valid.
1950  RelocIterator iter((nmethod*)this);
1951  while (iter.next()) {
1952    address stub = NULL;
1953    switch (iter.type()) {
1954      case relocInfo::virtual_call_type:
1955        verify_interrupt_point(iter.addr());
1956        break;
1957      case relocInfo::opt_virtual_call_type:
1958        stub = iter.opt_virtual_call_reloc()->static_stub();
1959        verify_interrupt_point(iter.addr());
1960        break;
1961      case relocInfo::static_call_type:
1962        stub = iter.static_call_reloc()->static_stub();
1963        //verify_interrupt_point(iter.addr());
1964        break;
1965      case relocInfo::runtime_call_type:
1966        address destination = iter.reloc()->value();
1967        // Right now there is no way to find out which entries support
1968        // an interrupt point.  It would be nice if we had this
1969        // information in a table.
1970        break;
1971    }
1972    assert(stub == NULL || stub_contains(stub), "static call stub outside stub section");
1973  }
1974}
1975
1976
1977// -----------------------------------------------------------------------------
1978// Non-product code
1979#ifndef PRODUCT
1980
1981void nmethod::check_store() {
1982  // Make sure all oops in the compiled code are tenured
1983
1984  RelocIterator iter(this);
1985  while (iter.next()) {
1986    if (iter.type() == relocInfo::oop_type) {
1987      oop_Relocation* reloc = iter.oop_reloc();
1988      oop obj = reloc->oop_value();
1989      if (obj != NULL && !obj->is_perm()) {
1990        fatal("must be permanent oop in compiled code");
1991      }
1992    }
1993  }
1994}
1995
1996#endif // PRODUCT
1997
1998// Printing operations
1999
2000void nmethod::print() const {
2001  ResourceMark rm;
2002  ttyLocker ttyl;   // keep the following output all in one block
2003
2004  tty->print("Compiled ");
2005
2006  if (is_compiled_by_c1()) {
2007    tty->print("(c1) ");
2008  } else if (is_compiled_by_c2()) {
2009    tty->print("(c2) ");
2010  } else {
2011    tty->print("(nm) ");
2012  }
2013
2014  print_on(tty, "nmethod");
2015  tty->cr();
2016  if (WizardMode) {
2017    tty->print("((nmethod*) "INTPTR_FORMAT ") ", this);
2018    tty->print(" for method " INTPTR_FORMAT , (address)method());
2019    tty->print(" { ");
2020    if (version())        tty->print("v%d ", version());
2021    if (level())          tty->print("l%d ", level());
2022    if (is_in_use())      tty->print("in_use ");
2023    if (is_not_entrant()) tty->print("not_entrant ");
2024    if (is_zombie())      tty->print("zombie ");
2025    if (is_unloaded())    tty->print("unloaded ");
2026    tty->print_cr("}:");
2027  }
2028  if (size              () > 0) tty->print_cr(" total in heap  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2029                                              (address)this,
2030                                              (address)this + size(),
2031                                              size());
2032  if (relocation_size   () > 0) tty->print_cr(" relocation     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2033                                              relocation_begin(),
2034                                              relocation_end(),
2035                                              relocation_size());
2036  if (code_size         () > 0) tty->print_cr(" main code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2037                                              code_begin(),
2038                                              code_end(),
2039                                              code_size());
2040  if (stub_size         () > 0) tty->print_cr(" stub code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2041                                              stub_begin(),
2042                                              stub_end(),
2043                                              stub_size());
2044  if (consts_size       () > 0) tty->print_cr(" constants      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2045                                              consts_begin(),
2046                                              consts_end(),
2047                                              consts_size());
2048  if (scopes_data_size  () > 0) tty->print_cr(" scopes data    [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2049                                              scopes_data_begin(),
2050                                              scopes_data_end(),
2051                                              scopes_data_size());
2052  if (scopes_pcs_size   () > 0) tty->print_cr(" scopes pcs     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2053                                              scopes_pcs_begin(),
2054                                              scopes_pcs_end(),
2055                                              scopes_pcs_size());
2056  if (dependencies_size () > 0) tty->print_cr(" dependencies   [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2057                                              dependencies_begin(),
2058                                              dependencies_end(),
2059                                              dependencies_size());
2060  if (handler_table_size() > 0) tty->print_cr(" handler table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2061                                              handler_table_begin(),
2062                                              handler_table_end(),
2063                                              handler_table_size());
2064  if (nul_chk_table_size() > 0) tty->print_cr(" nul chk table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2065                                              nul_chk_table_begin(),
2066                                              nul_chk_table_end(),
2067                                              nul_chk_table_size());
2068  if (oops_size         () > 0) tty->print_cr(" oops           [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2069                                              oops_begin(),
2070                                              oops_end(),
2071                                              oops_size());
2072}
2073
2074void nmethod::print_code() {
2075  HandleMark hm;
2076  ResourceMark m;
2077  Disassembler::decode(this);
2078}
2079
2080
2081#ifndef PRODUCT
2082
2083void nmethod::print_scopes() {
2084  // Find the first pc desc for all scopes in the code and print it.
2085  ResourceMark rm;
2086  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2087    if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)
2088      continue;
2089
2090    ScopeDesc* sd = scope_desc_at(p->real_pc(this));
2091    sd->print_on(tty, p);
2092  }
2093}
2094
2095void nmethod::print_dependencies() {
2096  ResourceMark rm;
2097  ttyLocker ttyl;   // keep the following output all in one block
2098  tty->print_cr("Dependencies:");
2099  for (Dependencies::DepStream deps(this); deps.next(); ) {
2100    deps.print_dependency();
2101    klassOop ctxk = deps.context_type();
2102    if (ctxk != NULL) {
2103      Klass* k = Klass::cast(ctxk);
2104      if (k->oop_is_instance() && ((instanceKlass*)k)->is_dependent_nmethod(this)) {
2105        tty->print_cr("   [nmethod<=klass]%s", k->external_name());
2106      }
2107    }
2108    deps.log_dependency();  // put it into the xml log also
2109  }
2110}
2111
2112
2113void nmethod::print_relocations() {
2114  ResourceMark m;       // in case methods get printed via the debugger
2115  tty->print_cr("relocations:");
2116  RelocIterator iter(this);
2117  iter.print();
2118  if (UseRelocIndex) {
2119    jint* index_end   = (jint*)relocation_end() - 1;
2120    jint  index_size  = *index_end;
2121    jint* index_start = (jint*)( (address)index_end - index_size );
2122    tty->print_cr("    index @" INTPTR_FORMAT ": index_size=%d", index_start, index_size);
2123    if (index_size > 0) {
2124      jint* ip;
2125      for (ip = index_start; ip+2 <= index_end; ip += 2)
2126        tty->print_cr("  (%d %d) addr=" INTPTR_FORMAT " @" INTPTR_FORMAT,
2127                      ip[0],
2128                      ip[1],
2129                      header_end()+ip[0],
2130                      relocation_begin()-1+ip[1]);
2131      for (; ip < index_end; ip++)
2132        tty->print_cr("  (%d ?)", ip[0]);
2133      tty->print_cr("          @" INTPTR_FORMAT ": index_size=%d", ip, *ip++);
2134      tty->print_cr("reloc_end @" INTPTR_FORMAT ":", ip);
2135    }
2136  }
2137}
2138
2139
2140void nmethod::print_pcs() {
2141  ResourceMark m;       // in case methods get printed via debugger
2142  tty->print_cr("pc-bytecode offsets:");
2143  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2144    p->print(this);
2145  }
2146}
2147
2148#endif // PRODUCT
2149
2150const char* nmethod::reloc_string_for(u_char* begin, u_char* end) {
2151  RelocIterator iter(this, begin, end);
2152  bool have_one = false;
2153  while (iter.next()) {
2154    have_one = true;
2155    switch (iter.type()) {
2156        case relocInfo::none:                  return "no_reloc";
2157        case relocInfo::oop_type: {
2158          stringStream st;
2159          oop_Relocation* r = iter.oop_reloc();
2160          oop obj = r->oop_value();
2161          st.print("oop(");
2162          if (obj == NULL) st.print("NULL");
2163          else obj->print_value_on(&st);
2164          st.print(")");
2165          return st.as_string();
2166        }
2167        case relocInfo::virtual_call_type:     return "virtual_call";
2168        case relocInfo::opt_virtual_call_type: return "optimized virtual_call";
2169        case relocInfo::static_call_type:      return "static_call";
2170        case relocInfo::static_stub_type:      return "static_stub";
2171        case relocInfo::runtime_call_type:     return "runtime_call";
2172        case relocInfo::external_word_type:    return "external_word";
2173        case relocInfo::internal_word_type:    return "internal_word";
2174        case relocInfo::section_word_type:     return "section_word";
2175        case relocInfo::poll_type:             return "poll";
2176        case relocInfo::poll_return_type:      return "poll_return";
2177        case relocInfo::type_mask:             return "type_bit_mask";
2178    }
2179  }
2180  return have_one ? "other" : NULL;
2181}
2182
2183// Return a the last scope in (begin..end]
2184ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
2185  PcDesc* p = pc_desc_near(begin+1);
2186  if (p != NULL && p->real_pc(this) <= end) {
2187    return new ScopeDesc(this, p->scope_decode_offset(),
2188                         p->obj_decode_offset());
2189  }
2190  return NULL;
2191}
2192
2193void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin, u_char* end) {
2194  // First, find an oopmap in (begin, end].
2195  // We use the odd half-closed interval so that oop maps and scope descs
2196  // which are tied to the byte after a call are printed with the call itself.
2197  address base = instructions_begin();
2198  OopMapSet* oms = oop_maps();
2199  if (oms != NULL) {
2200    for (int i = 0, imax = oms->size(); i < imax; i++) {
2201      OopMap* om = oms->at(i);
2202      address pc = base + om->offset();
2203      if (pc > begin) {
2204        if (pc <= end) {
2205          st->move_to(column);
2206          st->print("; ");
2207          om->print_on(st);
2208        }
2209        break;
2210      }
2211    }
2212  }
2213
2214  // Print any debug info present at this pc.
2215  ScopeDesc* sd  = scope_desc_in(begin, end);
2216  if (sd != NULL) {
2217    st->move_to(column);
2218    if (sd->bci() == SynchronizationEntryBCI) {
2219      st->print(";*synchronization entry");
2220    } else {
2221      if (sd->method().is_null()) {
2222        st->print("method is NULL");
2223      } else if (sd->method()->is_native()) {
2224        st->print("method is native");
2225      } else {
2226        address bcp  = sd->method()->bcp_from(sd->bci());
2227        Bytecodes::Code bc = Bytecodes::java_code_at(bcp);
2228        st->print(";*%s", Bytecodes::name(bc));
2229        switch (bc) {
2230        case Bytecodes::_invokevirtual:
2231        case Bytecodes::_invokespecial:
2232        case Bytecodes::_invokestatic:
2233        case Bytecodes::_invokeinterface:
2234          {
2235            Bytecode_invoke* invoke = Bytecode_invoke_at(sd->method(), sd->bci());
2236            st->print(" ");
2237            if (invoke->name() != NULL)
2238              invoke->name()->print_symbol_on(st);
2239            else
2240              st->print("<UNKNOWN>");
2241            break;
2242          }
2243        case Bytecodes::_getfield:
2244        case Bytecodes::_putfield:
2245        case Bytecodes::_getstatic:
2246        case Bytecodes::_putstatic:
2247          {
2248            methodHandle sdm = sd->method();
2249            Bytecode_field* field = Bytecode_field_at(sdm(), sdm->bcp_from(sd->bci()));
2250            constantPoolOop sdmc = sdm->constants();
2251            symbolOop name = sdmc->name_ref_at(field->index());
2252            st->print(" ");
2253            if (name != NULL)
2254              name->print_symbol_on(st);
2255            else
2256              st->print("<UNKNOWN>");
2257          }
2258        }
2259      }
2260    }
2261
2262    // Print all scopes
2263    for (;sd != NULL; sd = sd->sender()) {
2264      st->move_to(column);
2265      st->print("; -");
2266      if (sd->method().is_null()) {
2267        st->print("method is NULL");
2268      } else {
2269        sd->method()->print_short_name(st);
2270      }
2271      int lineno = sd->method()->line_number_from_bci(sd->bci());
2272      if (lineno != -1) {
2273        st->print("@%d (line %d)", sd->bci(), lineno);
2274      } else {
2275        st->print("@%d", sd->bci());
2276      }
2277      st->cr();
2278    }
2279  }
2280
2281  // Print relocation information
2282  const char* str = reloc_string_for(begin, end);
2283  if (str != NULL) {
2284    if (sd != NULL) st->cr();
2285    st->move_to(column);
2286    st->print(";   {%s}", str);
2287  }
2288  int cont_offset = ImplicitExceptionTable(this).at(begin - instructions_begin());
2289  if (cont_offset != 0) {
2290    st->move_to(column);
2291    st->print("; implicit exception: dispatches to " INTPTR_FORMAT, instructions_begin() + cont_offset);
2292  }
2293
2294}
2295
2296#ifndef PRODUCT
2297
2298void nmethod::print_value_on(outputStream* st) const {
2299  print_on(st, "nmethod");
2300}
2301
2302void nmethod::print_calls(outputStream* st) {
2303  RelocIterator iter(this);
2304  while (iter.next()) {
2305    switch (iter.type()) {
2306    case relocInfo::virtual_call_type:
2307    case relocInfo::opt_virtual_call_type: {
2308      VerifyMutexLocker mc(CompiledIC_lock);
2309      CompiledIC_at(iter.reloc())->print();
2310      break;
2311    }
2312    case relocInfo::static_call_type:
2313      st->print_cr("Static call at " INTPTR_FORMAT, iter.reloc()->addr());
2314      compiledStaticCall_at(iter.reloc())->print();
2315      break;
2316    }
2317  }
2318}
2319
2320void nmethod::print_handler_table() {
2321  ExceptionHandlerTable(this).print();
2322}
2323
2324void nmethod::print_nul_chk_table() {
2325  ImplicitExceptionTable(this).print(instructions_begin());
2326}
2327
2328void nmethod::print_statistics() {
2329  ttyLocker ttyl;
2330  if (xtty != NULL)  xtty->head("statistics type='nmethod'");
2331  nmethod_stats.print_native_nmethod_stats();
2332  nmethod_stats.print_nmethod_stats();
2333  DebugInformationRecorder::print_statistics();
2334  nmethod_stats.print_pc_stats();
2335  Dependencies::print_statistics();
2336  if (xtty != NULL)  xtty->tail("statistics");
2337}
2338
2339#endif // PRODUCT
2340