nmethod.hpp revision 9287:40bd4478a362
1/*
2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_CODE_NMETHOD_HPP
26#define SHARE_VM_CODE_NMETHOD_HPP
27
28#include "code/codeBlob.hpp"
29#include "code/pcDesc.hpp"
30#include "oops/metadata.hpp"
31
32// This class is used internally by nmethods, to cache
33// exception/pc/handler information.
34
35class ExceptionCache : public CHeapObj<mtCode> {
36  friend class VMStructs;
37 private:
38  enum { cache_size = 16 };
39  Klass*   _exception_type;
40  address  _pc[cache_size];
41  address  _handler[cache_size];
42  int      _count;
43  ExceptionCache* _next;
44
45  address pc_at(int index)                     { assert(index >= 0 && index < count(),""); return _pc[index]; }
46  void    set_pc_at(int index, address a)      { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
47  address handler_at(int index)                { assert(index >= 0 && index < count(),""); return _handler[index]; }
48  void    set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
49  int     count()                              { return _count; }
50  void    increment_count()                    { _count++; }
51
52 public:
53
54  ExceptionCache(Handle exception, address pc, address handler);
55
56  Klass*    exception_type()                { return _exception_type; }
57  ExceptionCache* next()                    { return _next; }
58  void      set_next(ExceptionCache *ec)    { _next = ec; }
59
60  address match(Handle exception, address pc);
61  bool    match_exception_with_space(Handle exception) ;
62  address test_address(address addr);
63  bool    add_address_and_handler(address addr, address handler) ;
64};
65
66
67// cache pc descs found in earlier inquiries
68class PcDescCache VALUE_OBJ_CLASS_SPEC {
69  friend class VMStructs;
70 private:
71  enum { cache_size = 4 };
72  // The array elements MUST be volatile! Several threads may modify
73  // and read from the cache concurrently. find_pc_desc_internal has
74  // returned wrong results. C++ compiler (namely xlC12) may duplicate
75  // C++ field accesses if the elements are not volatile.
76  typedef PcDesc* PcDescPtr;
77  volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
78 public:
79  PcDescCache() { debug_only(_pc_descs[0] = NULL); }
80  void    reset_to(PcDesc* initial_pc_desc);
81  PcDesc* find_pc_desc(int pc_offset, bool approximate);
82  void    add_pc_desc(PcDesc* pc_desc);
83  PcDesc* last_pc_desc() { return _pc_descs[0]; }
84};
85
86
87// nmethods (native methods) are the compiled code versions of Java methods.
88//
89// An nmethod contains:
90//  - header                 (the nmethod structure)
91//  [Relocation]
92//  - relocation information
93//  - constant part          (doubles, longs and floats used in nmethod)
94//  - oop table
95//  [Code]
96//  - code body
97//  - exception handler
98//  - stub code
99//  [Debugging information]
100//  - oop array
101//  - data array
102//  - pcs
103//  [Exception handler table]
104//  - handler entry point array
105//  [Implicit Null Pointer exception table]
106//  - implicit null table array
107
108class Dependencies;
109class ExceptionHandlerTable;
110class ImplicitExceptionTable;
111class AbstractCompiler;
112class xmlStream;
113
114class nmethod : public CodeBlob {
115  friend class VMStructs;
116  friend class NMethodSweeper;
117  friend class CodeCache;  // scavengable oops
118 private:
119
120  // GC support to help figure out if an nmethod has been
121  // cleaned/unloaded by the current GC.
122  static unsigned char _global_unloading_clock;
123
124  // Shared fields for all nmethod's
125  Method*   _method;
126  int       _entry_bci;        // != InvocationEntryBci if this nmethod is an on-stack replacement method
127  jmethodID _jmethod_id;       // Cache of method()->jmethod_id()
128
129#if INCLUDE_JVMCI
130  // Needed to keep nmethods alive that are not the default nmethod for the associated Method.
131  oop       _jvmci_installed_code;
132  oop       _speculation_log;
133#endif
134
135  // To support simple linked-list chaining of nmethods:
136  nmethod*  _osr_link;         // from InstanceKlass::osr_nmethods_head
137
138  union {
139    // Used by G1 to chain nmethods.
140    nmethod* _unloading_next;
141    // Used by non-G1 GCs to chain nmethods.
142    nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
143  };
144
145  static nmethod* volatile _oops_do_mark_nmethods;
146  nmethod*        volatile _oops_do_mark_link;
147
148  AbstractCompiler* _compiler; // The compiler which compiled this nmethod
149
150  // offsets for entry points
151  address _entry_point;                      // entry point with class check
152  address _verified_entry_point;             // entry point without class check
153  address _osr_entry_point;                  // entry point for on stack replacement
154
155  // Offsets for different nmethod parts
156  int _exception_offset;
157  // All deoptee's will resume execution at this location described by
158  // this offset.
159  int _deoptimize_offset;
160  // All deoptee's at a MethodHandle call site will resume execution
161  // at this location described by this offset.
162  int _deoptimize_mh_offset;
163  // Offset of the unwind handler if it exists
164  int _unwind_handler_offset;
165
166  int _consts_offset;
167  int _stub_offset;
168  int _oops_offset;                       // offset to where embedded oop table begins (inside data)
169  int _metadata_offset;                   // embedded meta data table
170  int _scopes_data_offset;
171  int _scopes_pcs_offset;
172  int _dependencies_offset;
173  int _handler_table_offset;
174  int _nul_chk_table_offset;
175  int _nmethod_end_offset;
176
177  // location in frame (offset for sp) that deopt can store the original
178  // pc during a deopt.
179  int _orig_pc_offset;
180
181  int _compile_id;                           // which compilation made this nmethod
182  int _comp_level;                           // compilation level
183
184  // protected by CodeCache_lock
185  bool _has_flushed_dependencies;            // Used for maintenance of dependencies (CodeCache_lock)
186
187  bool _marked_for_reclamation;              // Used by NMethodSweeper (set only by sweeper)
188  bool _marked_for_deoptimization;           // Used for stack deoptimization
189
190  // used by jvmti to track if an unload event has been posted for this nmethod.
191  bool _unload_reported;
192
193  // set during construction
194  unsigned int _has_unsafe_access:1;         // May fault due to unsafe access.
195  unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
196  unsigned int _lazy_critical_native:1;      // Lazy JNI critical native
197  unsigned int _has_wide_vectors:1;          // Preserve wide vectors at safepoints
198
199  // Protected by Patching_lock
200  volatile unsigned char _state;             // {alive, not_entrant, zombie, unloaded}
201
202  volatile unsigned char _unloading_clock;   // Incremented after GC unloaded/cleaned the nmethod
203
204#ifdef ASSERT
205  bool _oops_are_stale;  // indicates that it's no longer safe to access oops section
206#endif
207
208  jbyte _scavenge_root_state;
209
210#if INCLUDE_RTM_OPT
211  // RTM state at compile time. Used during deoptimization to decide
212  // whether to restart collecting RTM locking abort statistic again.
213  RTMState _rtm_state;
214#endif
215
216  // Nmethod Flushing lock. If non-zero, then the nmethod is not removed
217  // and is not made into a zombie. However, once the nmethod is made into
218  // a zombie, it will be locked one final time if CompiledMethodUnload
219  // event processing needs to be done.
220  volatile jint _lock_count;
221
222  // not_entrant method removal. Each mark_sweep pass will update
223  // this mark to current sweep invocation count if it is seen on the
224  // stack.  An not_entrant method can be removed when there are no
225  // more activations, i.e., when the _stack_traversal_mark is less than
226  // current sweep traversal index.
227  long _stack_traversal_mark;
228
229  // The _hotness_counter indicates the hotness of a method. The higher
230  // the value the hotter the method. The hotness counter of a nmethod is
231  // set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method
232  // is active while stack scanning (mark_active_nmethods()). The hotness
233  // counter is decreased (by 1) while sweeping.
234  int _hotness_counter;
235
236  ExceptionCache *_exception_cache;
237  PcDescCache     _pc_desc_cache;
238
239  // These are used for compiled synchronized native methods to
240  // locate the owner and stack slot for the BasicLock so that we can
241  // properly revoke the bias of the owner if necessary. They are
242  // needed because there is no debug information for compiled native
243  // wrappers and the oop maps are insufficient to allow
244  // frame::retrieve_receiver() to work. Currently they are expected
245  // to be byte offsets from the Java stack pointer for maximum code
246  // sharing between platforms. Note that currently biased locking
247  // will never cause Class instances to be biased but this code
248  // handles the static synchronized case as well.
249  // JVMTI's GetLocalInstance() also uses these offsets to find the receiver
250  // for non-static native wrapper frames.
251  ByteSize _native_receiver_sp_offset;
252  ByteSize _native_basic_lock_sp_offset;
253
254  friend class nmethodLocker;
255
256  // For native wrappers
257  nmethod(Method* method,
258          int nmethod_size,
259          int compile_id,
260          CodeOffsets* offsets,
261          CodeBuffer *code_buffer,
262          int frame_size,
263          ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
264          ByteSize basic_lock_sp_offset,       /* synchronized natives only */
265          OopMapSet* oop_maps);
266
267  // Creation support
268  nmethod(Method* method,
269          int nmethod_size,
270          int compile_id,
271          int entry_bci,
272          CodeOffsets* offsets,
273          int orig_pc_offset,
274          DebugInformationRecorder *recorder,
275          Dependencies* dependencies,
276          CodeBuffer *code_buffer,
277          int frame_size,
278          OopMapSet* oop_maps,
279          ExceptionHandlerTable* handler_table,
280          ImplicitExceptionTable* nul_chk_table,
281          AbstractCompiler* compiler,
282          int comp_level
283#if INCLUDE_JVMCI
284          , Handle installed_code,
285          Handle speculation_log
286#endif
287          );
288
289  // helper methods
290  void* operator new(size_t size, int nmethod_size, int comp_level) throw();
291
292  const char* reloc_string_for(u_char* begin, u_char* end);
293  // Returns true if this thread changed the state of the nmethod or
294  // false if another thread performed the transition.
295  bool make_not_entrant_or_zombie(unsigned int state);
296  void inc_decompile_count();
297
298  // Used to manipulate the exception cache
299  void add_exception_cache_entry(ExceptionCache* new_entry);
300  ExceptionCache* exception_cache_entry_for_exception(Handle exception);
301
302  // Inform external interfaces that a compiled method has been unloaded
303  void post_compiled_method_unload();
304
305  // Initailize fields to their default values
306  void init_defaults();
307
308 public:
309  // create nmethod with entry_bci
310  static nmethod* new_nmethod(methodHandle method,
311                              int compile_id,
312                              int entry_bci,
313                              CodeOffsets* offsets,
314                              int orig_pc_offset,
315                              DebugInformationRecorder* recorder,
316                              Dependencies* dependencies,
317                              CodeBuffer *code_buffer,
318                              int frame_size,
319                              OopMapSet* oop_maps,
320                              ExceptionHandlerTable* handler_table,
321                              ImplicitExceptionTable* nul_chk_table,
322                              AbstractCompiler* compiler,
323                              int comp_level
324#if INCLUDE_JVMCI
325                              , Handle installed_code = Handle(),
326                              Handle speculation_log = Handle()
327#endif
328                             );
329
330  static nmethod* new_native_nmethod(methodHandle method,
331                                     int compile_id,
332                                     CodeBuffer *code_buffer,
333                                     int vep_offset,
334                                     int frame_complete,
335                                     int frame_size,
336                                     ByteSize receiver_sp_offset,
337                                     ByteSize basic_lock_sp_offset,
338                                     OopMapSet* oop_maps);
339
340  // accessors
341  Method* method() const                          { return _method; }
342  AbstractCompiler* compiler() const              { return _compiler; }
343
344  // type info
345  bool is_nmethod() const                         { return true; }
346  bool is_java_method() const                     { return !method()->is_native(); }
347  bool is_native_method() const                   { return method()->is_native(); }
348  bool is_osr_method() const                      { return _entry_bci != InvocationEntryBci; }
349
350  bool is_compiled_by_c1() const;
351  bool is_compiled_by_jvmci() const;
352  bool is_compiled_by_c2() const;
353  bool is_compiled_by_shark() const;
354
355  // boundaries for different parts
356  address consts_begin          () const          { return           header_begin() + _consts_offset        ; }
357  address consts_end            () const          { return           header_begin() +  code_offset()        ; }
358  address insts_begin           () const          { return           header_begin() +  code_offset()        ; }
359  address insts_end             () const          { return           header_begin() + _stub_offset          ; }
360  address stub_begin            () const          { return           header_begin() + _stub_offset          ; }
361  address stub_end              () const          { return           header_begin() + _oops_offset          ; }
362  address exception_begin       () const          { return           header_begin() + _exception_offset     ; }
363  address deopt_handler_begin   () const          { return           header_begin() + _deoptimize_offset    ; }
364  address deopt_mh_handler_begin() const          { return           header_begin() + _deoptimize_mh_offset ; }
365  address unwind_handler_begin  () const          { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
366  oop*    oops_begin            () const          { return (oop*)   (header_begin() + _oops_offset)         ; }
367  oop*    oops_end              () const          { return (oop*)   (header_begin() + _metadata_offset)     ; }
368
369  Metadata** metadata_begin   () const            { return (Metadata**)  (header_begin() + _metadata_offset)     ; }
370  Metadata** metadata_end     () const            { return (Metadata**)  (header_begin() + _scopes_data_offset)  ; }
371
372  address scopes_data_begin     () const          { return           header_begin() + _scopes_data_offset   ; }
373  address scopes_data_end       () const          { return           header_begin() + _scopes_pcs_offset    ; }
374  PcDesc* scopes_pcs_begin      () const          { return (PcDesc*)(header_begin() + _scopes_pcs_offset   ); }
375  PcDesc* scopes_pcs_end        () const          { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
376  address dependencies_begin    () const          { return           header_begin() + _dependencies_offset  ; }
377  address dependencies_end      () const          { return           header_begin() + _handler_table_offset ; }
378  address handler_table_begin   () const          { return           header_begin() + _handler_table_offset ; }
379  address handler_table_end     () const          { return           header_begin() + _nul_chk_table_offset ; }
380  address nul_chk_table_begin   () const          { return           header_begin() + _nul_chk_table_offset ; }
381  address nul_chk_table_end     () const          { return           header_begin() + _nmethod_end_offset   ; }
382
383  // Sizes
384  int consts_size       () const                  { return            consts_end       () -            consts_begin       (); }
385  int insts_size        () const                  { return            insts_end        () -            insts_begin        (); }
386  int stub_size         () const                  { return            stub_end         () -            stub_begin         (); }
387  int oops_size         () const                  { return (address)  oops_end         () - (address)  oops_begin         (); }
388  int metadata_size     () const                  { return (address)  metadata_end     () - (address)  metadata_begin     (); }
389  int scopes_data_size  () const                  { return            scopes_data_end  () -            scopes_data_begin  (); }
390  int scopes_pcs_size   () const                  { return (intptr_t) scopes_pcs_end   () - (intptr_t) scopes_pcs_begin   (); }
391  int dependencies_size () const                  { return            dependencies_end () -            dependencies_begin (); }
392  int handler_table_size() const                  { return            handler_table_end() -            handler_table_begin(); }
393  int nul_chk_table_size() const                  { return            nul_chk_table_end() -            nul_chk_table_begin(); }
394
395  int total_size        () const;
396
397  void dec_hotness_counter()        { _hotness_counter--; }
398  void set_hotness_counter(int val) { _hotness_counter = val; }
399  int  hotness_counter() const      { return _hotness_counter; }
400
401  // Containment
402  bool consts_contains       (address addr) const { return consts_begin       () <= addr && addr < consts_end       (); }
403  bool insts_contains        (address addr) const { return insts_begin        () <= addr && addr < insts_end        (); }
404  bool stub_contains         (address addr) const { return stub_begin         () <= addr && addr < stub_end         (); }
405  bool oops_contains         (oop*    addr) const { return oops_begin         () <= addr && addr < oops_end         (); }
406  bool metadata_contains     (Metadata** addr) const   { return metadata_begin     () <= addr && addr < metadata_end     (); }
407  bool scopes_data_contains  (address addr) const { return scopes_data_begin  () <= addr && addr < scopes_data_end  (); }
408  bool scopes_pcs_contains   (PcDesc* addr) const { return scopes_pcs_begin   () <= addr && addr < scopes_pcs_end   (); }
409  bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
410  bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
411
412  // entry points
413  address entry_point() const                     { return _entry_point;             } // normal entry point
414  address verified_entry_point() const            { return _verified_entry_point;    } // if klass is correct
415
416  enum { in_use       = 0,   // executable nmethod
417         not_entrant  = 1,   // marked for deoptimization but activations may still exist,
418                             // will be transformed to zombie when all activations are gone
419         zombie       = 2,   // no activations exist, nmethod is ready for purge
420         unloaded     = 3 }; // there should be no activations, should not be called,
421                             // will be transformed to zombie immediately
422
423  // flag accessing and manipulation
424  bool  is_in_use() const                         { return _state == in_use; }
425  bool  is_alive() const                          { return _state == in_use || _state == not_entrant; }
426  bool  is_not_entrant() const                    { return _state == not_entrant; }
427  bool  is_zombie() const                         { return _state == zombie; }
428  bool  is_unloaded() const                       { return _state == unloaded;   }
429
430#if INCLUDE_RTM_OPT
431  // rtm state accessing and manipulating
432  RTMState  rtm_state() const                     { return _rtm_state; }
433  void set_rtm_state(RTMState state)              { _rtm_state = state; }
434#endif
435
436  // Make the nmethod non entrant. The nmethod will continue to be
437  // alive.  It is used when an uncommon trap happens.  Returns true
438  // if this thread changed the state of the nmethod or false if
439  // another thread performed the transition.
440  bool  make_not_entrant() {
441    assert(!method()->is_method_handle_intrinsic(), "Cannot make MH intrinsic not entrant");
442    return make_not_entrant_or_zombie(not_entrant);
443  }
444  bool  make_zombie()      { return make_not_entrant_or_zombie(zombie); }
445
446  // used by jvmti to track if the unload event has been reported
447  bool  unload_reported()                         { return _unload_reported; }
448  void  set_unload_reported()                     { _unload_reported = true; }
449
450  void set_unloading_next(nmethod* next)          { _unloading_next = next; }
451  nmethod* unloading_next()                       { return _unloading_next; }
452
453  static unsigned char global_unloading_clock()   { return _global_unloading_clock; }
454  static void increase_unloading_clock();
455
456  void set_unloading_clock(unsigned char unloading_clock);
457  unsigned char unloading_clock();
458
459  bool  is_marked_for_deoptimization() const      { return _marked_for_deoptimization; }
460  void  mark_for_deoptimization()                 { _marked_for_deoptimization = true; }
461
462  void  make_unloaded(BoolObjectClosure* is_alive, oop cause);
463
464  bool has_dependencies()                         { return dependencies_size() != 0; }
465  void flush_dependencies(BoolObjectClosure* is_alive);
466  bool has_flushed_dependencies()                 { return _has_flushed_dependencies; }
467  void set_has_flushed_dependencies()             {
468    assert(!has_flushed_dependencies(), "should only happen once");
469    _has_flushed_dependencies = 1;
470  }
471
472  bool  is_marked_for_reclamation() const         { return _marked_for_reclamation; }
473  void  mark_for_reclamation()                    { _marked_for_reclamation = 1; }
474
475  bool  has_unsafe_access() const                 { return _has_unsafe_access; }
476  void  set_has_unsafe_access(bool z)             { _has_unsafe_access = z; }
477
478  bool  has_method_handle_invokes() const         { return _has_method_handle_invokes; }
479  void  set_has_method_handle_invokes(bool z)     { _has_method_handle_invokes = z; }
480
481  bool  is_lazy_critical_native() const           { return _lazy_critical_native; }
482  void  set_lazy_critical_native(bool z)          { _lazy_critical_native = z; }
483
484  bool  has_wide_vectors() const                  { return _has_wide_vectors; }
485  void  set_has_wide_vectors(bool z)              { _has_wide_vectors = z; }
486
487  int   comp_level() const                        { return _comp_level; }
488
489  // Support for oops in scopes and relocs:
490  // Note: index 0 is reserved for null.
491  oop   oop_at(int index) const                   { return index == 0 ? (oop) NULL: *oop_addr_at(index); }
492  oop*  oop_addr_at(int index) const {  // for GC
493    // relocation indexes are biased by 1 (because 0 is reserved)
494    assert(index > 0 && index <= oops_size(), "must be a valid non-zero index");
495    assert(!_oops_are_stale, "oops are stale");
496    return &oops_begin()[index - 1];
497  }
498
499  // Support for meta data in scopes and relocs:
500  // Note: index 0 is reserved for null.
501  Metadata*     metadata_at(int index) const      { return index == 0 ? NULL: *metadata_addr_at(index); }
502  Metadata**  metadata_addr_at(int index) const {  // for GC
503    // relocation indexes are biased by 1 (because 0 is reserved)
504    assert(index > 0 && index <= metadata_size(), "must be a valid non-zero index");
505    return &metadata_begin()[index - 1];
506  }
507
508  void copy_values(GrowableArray<jobject>* oops);
509  void copy_values(GrowableArray<Metadata*>* metadata);
510
511  // Relocation support
512private:
513  void fix_oop_relocations(address begin, address end, bool initialize_immediates);
514  inline void initialize_immediate_oop(oop* dest, jobject handle);
515
516public:
517  void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
518  void fix_oop_relocations()                           { fix_oop_relocations(NULL, NULL, false); }
519  void verify_oop_relocations();
520
521  bool is_at_poll_return(address pc);
522  bool is_at_poll_or_poll_return(address pc);
523
524  // Scavengable oop support
525  bool  on_scavenge_root_list() const                  { return (_scavenge_root_state & 1) != 0; }
526 protected:
527  enum { sl_on_list = 0x01, sl_marked = 0x10 };
528  void  set_on_scavenge_root_list()                    { _scavenge_root_state = sl_on_list; }
529  void  clear_on_scavenge_root_list()                  { _scavenge_root_state = 0; }
530  // assertion-checking and pruning logic uses the bits of _scavenge_root_state
531#ifndef PRODUCT
532  void  set_scavenge_root_marked()                     { _scavenge_root_state |= sl_marked; }
533  void  clear_scavenge_root_marked()                   { _scavenge_root_state &= ~sl_marked; }
534  bool  scavenge_root_not_marked()                     { return (_scavenge_root_state &~ sl_on_list) == 0; }
535  // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
536#endif //PRODUCT
537  nmethod* scavenge_root_link() const                  { return _scavenge_root_link; }
538  void     set_scavenge_root_link(nmethod *n)          { _scavenge_root_link = n; }
539
540 public:
541
542  // Sweeper support
543  long  stack_traversal_mark()                    { return _stack_traversal_mark; }
544  void  set_stack_traversal_mark(long l)          { _stack_traversal_mark = l; }
545
546  // Exception cache support
547  ExceptionCache* exception_cache() const         { return _exception_cache; }
548  void set_exception_cache(ExceptionCache *ec)    { _exception_cache = ec; }
549  address handler_for_exception_and_pc(Handle exception, address pc);
550  void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
551  void clean_exception_cache(BoolObjectClosure* is_alive);
552
553  // implicit exceptions support
554  address continuation_for_implicit_exception(address pc);
555
556  // On-stack replacement support
557  int   osr_entry_bci() const                     { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; }
558  address  osr_entry() const                      { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; }
559  void  invalidate_osr_method();
560  nmethod* osr_link() const                       { return _osr_link; }
561  void     set_osr_link(nmethod *n)               { _osr_link = n; }
562
563  // tells whether frames described by this nmethod can be deoptimized
564  // note: native wrappers cannot be deoptimized.
565  bool can_be_deoptimized() const { return is_java_method(); }
566
567  // Inline cache support
568  void clear_inline_caches();
569  void clear_ic_stubs();
570  void cleanup_inline_caches();
571  bool inlinecache_check_contains(address addr) const {
572    return (addr >= code_begin() && addr < verified_entry_point());
573  }
574
575  // Verify calls to dead methods have been cleaned.
576  void verify_clean_inline_caches();
577  // Verify and count cached icholder relocations.
578  int  verify_icholder_relocations();
579  // Check that all metadata is still alive
580  void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive);
581
582  // unlink and deallocate this nmethod
583  // Only NMethodSweeper class is expected to use this. NMethodSweeper is not
584  // expected to use any other private methods/data in this class.
585
586 protected:
587  void flush();
588
589 public:
590  // When true is returned, it is unsafe to remove this nmethod even if
591  // it is a zombie, since the VM or the ServiceThread might still be
592  // using it.
593  bool is_locked_by_vm() const                    { return _lock_count >0; }
594
595  // See comment at definition of _last_seen_on_stack
596  void mark_as_seen_on_stack();
597  bool can_convert_to_zombie();
598
599  // Evolution support. We make old (discarded) compiled methods point to new Method*s.
600  void set_method(Method* method) { _method = method; }
601
602#if INCLUDE_JVMCI
603  oop jvmci_installed_code() { return _jvmci_installed_code ; }
604  char* jvmci_installed_code_name(char* buf, size_t buflen);
605  void set_jvmci_installed_code(oop installed_code) { _jvmci_installed_code = installed_code;  }
606  void maybe_invalidate_installed_code();
607  oop speculation_log() { return _speculation_log ; }
608  void set_speculation_log(oop speculation_log) { _speculation_log = speculation_log;  }
609#endif
610
611  // GC support
612  void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
613  //  The parallel versions are used by G1.
614  bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
615  void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
616
617 private:
618  //  Unload a nmethod if the *root object is dead.
619  bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
620  bool unload_if_dead_at(RelocIterator *iter_at_oop, BoolObjectClosure* is_alive, bool unloading_occurred);
621
622 public:
623  void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
624                                     OopClosure* f);
625  void oops_do(OopClosure* f) { oops_do(f, false); }
626  void oops_do(OopClosure* f, bool allow_zombie);
627  bool detect_scavenge_root_oops();
628  void verify_scavenge_root_oops() PRODUCT_RETURN;
629
630  bool test_set_oops_do_mark();
631  static void oops_do_marking_prologue();
632  static void oops_do_marking_epilogue();
633  static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
634  bool test_oops_do_mark() { return _oops_do_mark_link != NULL; }
635
636  // ScopeDesc for an instruction
637  ScopeDesc* scope_desc_at(address pc);
638
639 private:
640  ScopeDesc* scope_desc_in(address begin, address end);
641
642  address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
643
644  PcDesc* find_pc_desc_internal(address pc, bool approximate);
645
646  PcDesc* find_pc_desc(address pc, bool approximate) {
647    PcDesc* desc = _pc_desc_cache.last_pc_desc();
648    if (desc != NULL && desc->pc_offset() == pc - code_begin()) {
649      return desc;
650    }
651    return find_pc_desc_internal(pc, approximate);
652  }
653
654 public:
655  // ScopeDesc retrieval operation
656  PcDesc* pc_desc_at(address pc)   { return find_pc_desc(pc, false); }
657  // pc_desc_near returns the first PcDesc at or after the givne pc.
658  PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
659
660 public:
661  // copying of debugging information
662  void copy_scopes_pcs(PcDesc* pcs, int count);
663  void copy_scopes_data(address buffer, int size);
664
665  // Deopt
666  // Return true is the PC is one would expect if the frame is being deopted.
667  bool is_deopt_pc      (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
668  bool is_deopt_entry   (address pc);
669  bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
670  // Accessor/mutator for the original pc of a frame before a frame was deopted.
671  address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
672  void    set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
673
674  static address get_deopt_original_pc(const frame* fr);
675
676  // MethodHandle
677  bool is_method_handle_return(address return_pc);
678
679  // jvmti support:
680  void post_compiled_method_load_event();
681  jmethodID get_and_cache_jmethod_id();
682
683  // verify operations
684  void verify();
685  void verify_scopes();
686  void verify_interrupt_point(address interrupt_point);
687
688  // printing support
689  void print()                          const;
690  void print_code();
691  void print_relocations()                        PRODUCT_RETURN;
692  void print_pcs()                                PRODUCT_RETURN;
693  void print_scopes()                             PRODUCT_RETURN;
694  void print_dependencies()                       PRODUCT_RETURN;
695  void print_value_on(outputStream* st) const     PRODUCT_RETURN;
696  void print_calls(outputStream* st)              PRODUCT_RETURN;
697  void print_handler_table()                      PRODUCT_RETURN;
698  void print_nul_chk_table()                      PRODUCT_RETURN;
699  void print_nmethod(bool print_code);
700
701  // need to re-define this from CodeBlob else the overload hides it
702  virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); }
703  void print_on(outputStream* st, const char* msg) const;
704
705  // Logging
706  void log_identity(xmlStream* log) const;
707  void log_new_nmethod() const;
708  void log_state_change() const;
709
710  // Prints block-level comments, including nmethod specific block labels:
711  virtual void print_block_comment(outputStream* stream, address block_begin) const {
712    print_nmethod_labels(stream, block_begin);
713    CodeBlob::print_block_comment(stream, block_begin);
714  }
715  void print_nmethod_labels(outputStream* stream, address block_begin) const;
716
717  // Prints a comment for one native instruction (reloc info, pc desc)
718  void print_code_comment_on(outputStream* st, int column, address begin, address end);
719  static void print_statistics() PRODUCT_RETURN;
720
721  // Compiler task identification.  Note that all OSR methods
722  // are numbered in an independent sequence if CICountOSR is true,
723  // and native method wrappers are also numbered independently if
724  // CICountNative is true.
725  int  compile_id() const                         { return _compile_id; }
726  const char* compile_kind() const;
727
728  // tells if any of this method's dependencies have been invalidated
729  // (this is expensive!)
730  static void check_all_dependencies(DepChange& changes);
731
732  // tells if this compiled method is dependent on the given changes,
733  // and the changes have invalidated it
734  bool check_dependency_on(DepChange& changes);
735
736  // Evolution support. Tells if this compiled method is dependent on any of
737  // methods m() of class dependee, such that if m() in dependee is replaced,
738  // this compiled method will have to be deoptimized.
739  bool is_evol_dependent_on(Klass* dependee);
740
741  // Fast breakpoint support. Tells if this compiled method is
742  // dependent on the given method. Returns true if this nmethod
743  // corresponds to the given method as well.
744  bool is_dependent_on_method(Method* dependee);
745
746  // is it ok to patch at address?
747  bool is_patchable_at(address instr_address);
748
749  // UseBiasedLocking support
750  ByteSize native_receiver_sp_offset() {
751    return _native_receiver_sp_offset;
752  }
753  ByteSize native_basic_lock_sp_offset() {
754    return _native_basic_lock_sp_offset;
755  }
756
757  // support for code generation
758  static int verified_entry_point_offset()        { return offset_of(nmethod, _verified_entry_point); }
759  static int osr_entry_point_offset()             { return offset_of(nmethod, _osr_entry_point); }
760  static int state_offset()                       { return offset_of(nmethod, _state); }
761
762  // RedefineClasses support.   Mark metadata in nmethods as on_stack so that
763  // redefine classes doesn't purge it.
764  static void mark_on_stack(nmethod* nm) {
765    nm->metadata_do(Metadata::mark_on_stack);
766  }
767  void metadata_do(void f(Metadata*));
768};
769
770// Locks an nmethod so its code will not get removed and it will not
771// be made into a zombie, even if it is a not_entrant method. After the
772// nmethod becomes a zombie, if CompiledMethodUnload event processing
773// needs to be done, then lock_nmethod() is used directly to keep the
774// generated code from being reused too early.
775class nmethodLocker : public StackObj {
776  nmethod* _nm;
777
778 public:
779
780  // note: nm can be NULL
781  // Only JvmtiDeferredEvent::compiled_method_unload_event()
782  // should pass zombie_ok == true.
783  static void lock_nmethod(nmethod* nm, bool zombie_ok = false);
784  static void unlock_nmethod(nmethod* nm); // (ditto)
785
786  nmethodLocker(address pc); // derive nm from pc
787  nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }
788  nmethodLocker() { _nm = NULL; }
789  ~nmethodLocker() { unlock_nmethod(_nm); }
790
791  nmethod* code() { return _nm; }
792  void set_code(nmethod* new_nm) {
793    unlock_nmethod(_nm);   // note:  This works even if _nm==new_nm.
794    _nm = new_nm;
795    lock_nmethod(_nm);
796  }
797};
798
799#endif // SHARE_VM_CODE_NMETHOD_HPP
800