memTracker.hpp revision 6692:4f9fa4b62c18
1/*
2 * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_SERVICES_MEM_TRACKER_HPP
26#define SHARE_VM_SERVICES_MEM_TRACKER_HPP
27
28#include "utilities/macros.hpp"
29
30#if !INCLUDE_NMT
31
32#include "utilities/ostream.hpp"
33
34class BaselineOutputer : public StackObj {
35
36};
37
38class BaselineTTYOutputer : public BaselineOutputer {
39  public:
40    BaselineTTYOutputer(outputStream* st) { }
41};
42
43class MemTracker : AllStatic {
44  public:
45   enum ShutdownReason {
46      NMT_shutdown_none,     // no shutdown requested
47      NMT_shutdown_user,     // user requested shutdown
48      NMT_normal,            // normal shutdown, process exit
49      NMT_out_of_memory,     // shutdown due to out of memory
50      NMT_initialization,    // shutdown due to initialization failure
51      NMT_use_malloc_only,   // can not combine NMT with UseMallocOnly flag
52      NMT_error_reporting,   // shutdown by vmError::report_and_die()
53      NMT_out_of_generation, // running out of generation queue
54      NMT_sequence_overflow  // overflow the sequence number
55   };
56
57  class Tracker {
58   public:
59    void discard() { }
60
61    void record(address addr, size_t size = 0, MEMFLAGS flags = mtNone, address pc = NULL) { }
62    void record(address old_addr, address new_addr, size_t size,
63      MEMFLAGS flags, address pc = NULL) { }
64  };
65
66  private:
67   static Tracker  _tkr;
68
69
70  public:
71   static inline void init_tracking_options(const char* option_line) { }
72   static inline bool is_on()   { return false; }
73   static const char* reason()  { return "Native memory tracking is not implemented"; }
74   static inline bool can_walk_stack() { return false; }
75
76   static inline void bootstrap_single_thread() { }
77   static inline void bootstrap_multi_thread() { }
78   static inline void start() { }
79
80   static inline void record_malloc(address addr, size_t size, MEMFLAGS flags,
81        address pc = 0, Thread* thread = NULL) { }
82   static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) { }
83   static inline void record_arena_size(address addr, size_t size) { }
84   static inline void record_virtual_memory_reserve(address addr, size_t size,
85        MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
86   static inline void record_virtual_memory_reserve_and_commit(address addr, size_t size,
87        MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
88   static inline void record_virtual_memory_commit(address addr, size_t size,
89        address pc = 0, Thread* thread = NULL) { }
90   static inline void record_virtual_memory_release(address addr, size_t size,
91        Thread* thread = NULL) { }
92   static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
93        Thread* thread = NULL) { }
94   static inline Tracker get_realloc_tracker() { return _tkr; }
95   static inline Tracker get_virtual_memory_uncommit_tracker() { return _tkr; }
96   static inline Tracker get_virtual_memory_release_tracker()  { return _tkr; }
97   static inline bool baseline() { return false; }
98   static inline bool has_baseline() { return false; }
99
100   static inline void set_autoShutdown(bool value) { }
101   static void shutdown(ShutdownReason reason) { }
102   static inline bool shutdown_in_progress() { return false; }
103   static bool print_memory_usage(BaselineOutputer& out, size_t unit,
104            bool summary_only = true) { return false; }
105   static bool compare_memory_usage(BaselineOutputer& out, size_t unit,
106            bool summary_only = true) { return false; }
107
108   static bool wbtest_wait_for_data_merge() { return false; }
109
110   static inline void sync() { }
111   static inline void thread_exiting(JavaThread* thread) { }
112};
113
114
115#else // !INCLUDE_NMT
116
117#include "memory/allocation.hpp"
118#include "runtime/globals.hpp"
119#include "runtime/mutex.hpp"
120#include "runtime/os.hpp"
121#include "runtime/thread.hpp"
122#include "services/memPtr.hpp"
123#include "services/memRecorder.hpp"
124#include "services/memSnapshot.hpp"
125#include "services/memTrackWorker.hpp"
126
127extern bool NMT_track_callsite;
128
129#ifndef MAX_UNSIGNED_LONG
130#define MAX_UNSIGNED_LONG    (unsigned long)(-1)
131#endif
132
133#ifdef ASSERT
134  #define DEBUG_CALLER_PC  (NMT_track_callsite ? os::get_caller_pc(2) : 0)
135#else
136  #define DEBUG_CALLER_PC  0
137#endif
138
139// The thread closure walks threads to collect per-thread
140// memory recorders at NMT sync point
141class SyncThreadRecorderClosure : public ThreadClosure {
142 private:
143  int _thread_count;
144
145 public:
146  SyncThreadRecorderClosure() {
147    _thread_count =0;
148  }
149
150  void do_thread(Thread* thread);
151  int  get_thread_count() const {
152    return _thread_count;
153  }
154};
155
156class BaselineOutputer;
157class MemSnapshot;
158class MemTrackWorker;
159class Thread;
160/*
161 * MemTracker is the 'gate' class to native memory tracking runtime.
162 */
163class MemTracker : AllStatic {
164  friend class GenerationData;
165  friend class MemTrackWorker;
166  friend class MemSnapshot;
167  friend class SyncThreadRecorderClosure;
168
169  // NMT state
170  enum NMTStates {
171    NMT_uninited,                        // not yet initialized
172    NMT_bootstrapping_single_thread,     // bootstrapping, VM is in single thread mode
173    NMT_bootstrapping_multi_thread,      // bootstrapping, VM is about to enter multi-thread mode
174    NMT_started,                         // NMT fully started
175    NMT_shutdown_pending,                // shutdown pending
176    NMT_final_shutdown,                  // in final phase of shutdown
177    NMT_shutdown                         // shutdown
178  };
179
180 public:
181  class Tracker : public StackObj {
182    friend class MemTracker;
183   public:
184    enum MemoryOperation {
185      NoOp,                   // no op
186      Malloc,                 // malloc
187      Realloc,                // realloc
188      Free,                   // free
189      Reserve,                // virtual memory reserve
190      Commit,                 // virtual memory commit
191      ReserveAndCommit,       // virtual memory reserve and commit
192      StackAlloc = ReserveAndCommit, // allocate thread stack
193      Type,                   // assign virtual memory type
194      Uncommit,               // virtual memory uncommit
195      Release,                // virtual memory release
196      ArenaSize,              // set arena size
197      StackRelease            // release thread stack
198    };
199
200
201   protected:
202    Tracker(MemoryOperation op, Thread* thr = NULL);
203
204   public:
205    void discard();
206
207    void record(address addr, size_t size = 0, MEMFLAGS flags = mtNone, address pc = NULL);
208    void record(address old_addr, address new_addr, size_t size,
209      MEMFLAGS flags, address pc = NULL);
210
211   private:
212    bool            _need_thread_critical_lock;
213    JavaThread*     _java_thread;
214    MemoryOperation _op;          // memory operation
215    jint            _seq;         // reserved sequence number
216  };
217
218
219 public:
220  // native memory tracking level
221  enum NMTLevel {
222    NMT_off,              // native memory tracking is off
223    NMT_summary,          // don't track callsite
224    NMT_detail            // track callsite also
225  };
226
227   enum ShutdownReason {
228     NMT_shutdown_none,     // no shutdown requested
229     NMT_shutdown_user,     // user requested shutdown
230     NMT_normal,            // normal shutdown, process exit
231     NMT_out_of_memory,     // shutdown due to out of memory
232     NMT_initialization,    // shutdown due to initialization failure
233     NMT_use_malloc_only,   // can not combine NMT with UseMallocOnly flag
234     NMT_error_reporting,   // shutdown by vmError::report_and_die()
235     NMT_out_of_generation, // running out of generation queue
236     NMT_sequence_overflow  // overflow the sequence number
237   };
238
239 public:
240  // initialize NMT tracking level from command line options, called
241   // from VM command line parsing code
242  static void init_tracking_options(const char* option_line);
243
244  // if NMT is enabled to record memory activities
245  static inline bool is_on() {
246    return (_tracking_level >= NMT_summary &&
247      _state >= NMT_bootstrapping_single_thread);
248  }
249
250  static inline enum NMTLevel tracking_level() {
251    return _tracking_level;
252  }
253
254  // user readable reason for shutting down NMT
255  static const char* reason() {
256    switch(_reason) {
257      case NMT_shutdown_none:
258        return "Native memory tracking is not enabled";
259      case NMT_shutdown_user:
260        return "Native memory tracking has been shutdown by user";
261      case NMT_normal:
262        return "Native memory tracking has been shutdown due to process exiting";
263      case NMT_out_of_memory:
264        return "Native memory tracking has been shutdown due to out of native memory";
265      case NMT_initialization:
266        return "Native memory tracking failed to initialize";
267      case NMT_error_reporting:
268        return "Native memory tracking has been shutdown due to error reporting";
269      case NMT_out_of_generation:
270        return "Native memory tracking has been shutdown due to running out of generation buffer";
271      case NMT_sequence_overflow:
272        return "Native memory tracking has been shutdown due to overflow the sequence number";
273      case NMT_use_malloc_only:
274        return "Native memory tracking is not supported when UseMallocOnly is on";
275      default:
276        ShouldNotReachHere();
277        return NULL;
278    }
279  }
280
281  // test if we can walk native stack
282  static bool can_walk_stack() {
283  // native stack is not walkable during bootstrapping on sparc
284#if defined(SPARC)
285    return (_state == NMT_started);
286#else
287    return (_state >= NMT_bootstrapping_single_thread && _state  <= NMT_started);
288#endif
289  }
290
291  // if native memory tracking tracks callsite
292  static inline bool track_callsite() { return _tracking_level == NMT_detail; }
293
294  // NMT automatically shuts itself down under extreme situation by default.
295  // When the value is set to false,  NMT will try its best to stay alive,
296  // even it has to slow down VM.
297  static inline void set_autoShutdown(bool value) {
298    AutoShutdownNMT = value;
299    if (AutoShutdownNMT && _slowdown_calling_thread) {
300      _slowdown_calling_thread = false;
301    }
302  }
303
304  // shutdown native memory tracking capability. Native memory tracking
305  // can be shutdown by VM when it encounters low memory scenarios.
306  // Memory tracker should gracefully shutdown itself, and preserve the
307  // latest memory statistics for post morten diagnosis.
308  static void shutdown(ShutdownReason reason);
309
310  // if there is shutdown requested
311  static inline bool shutdown_in_progress() {
312    return (_state >= NMT_shutdown_pending);
313  }
314
315  // bootstrap native memory tracking, so it can start to collect raw data
316  // before worker thread can start
317
318  // the first phase of bootstrapping, when VM still in single-threaded mode
319  static void bootstrap_single_thread();
320  // the second phase of bootstrapping, VM is about or already in multi-threaded mode
321  static void bootstrap_multi_thread();
322
323
324  // start() has to be called when VM still in single thread mode, but after
325  // command line option parsing is done.
326  static void start();
327
328  // record a 'malloc' call
329  static inline void record_malloc(address addr, size_t size, MEMFLAGS flags,
330                            address pc = 0, Thread* thread = NULL) {
331    Tracker tkr(Tracker::Malloc, thread);
332    tkr.record(addr, size, flags, pc);
333  }
334  // record a 'free' call
335  static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) {
336    Tracker tkr(Tracker::Free, thread);
337    tkr.record(addr, 0, flags, DEBUG_CALLER_PC);
338  }
339
340  static inline void record_arena_size(address addr, size_t size) {
341    Tracker tkr(Tracker::ArenaSize);
342    tkr.record(addr, size);
343  }
344
345  // record a virtual memory 'reserve' call
346  static inline void record_virtual_memory_reserve(address addr, size_t size,
347                     MEMFLAGS flags, address pc = 0, Thread* thread = NULL) {
348    assert(size > 0, "Sanity check");
349    Tracker tkr(Tracker::Reserve, thread);
350    tkr.record(addr, size, flags, pc);
351  }
352
353  static inline void record_thread_stack(address addr, size_t size, Thread* thr,
354                           address pc = 0) {
355    Tracker tkr(Tracker::StackAlloc, thr);
356    tkr.record(addr, size, mtThreadStack, pc);
357  }
358
359  static inline void release_thread_stack(address addr, size_t size, Thread* thr) {
360    Tracker tkr(Tracker::StackRelease, thr);
361    tkr.record(addr, size, mtThreadStack, DEBUG_CALLER_PC);
362  }
363
364  // record a virtual memory 'commit' call
365  static inline void record_virtual_memory_commit(address addr, size_t size,
366                            address pc, Thread* thread = NULL) {
367    Tracker tkr(Tracker::Commit, thread);
368    tkr.record(addr, size, mtNone, pc);
369  }
370
371  static inline void record_virtual_memory_reserve_and_commit(address addr, size_t size,
372    MEMFLAGS flags, address pc, Thread* thread = NULL) {
373    Tracker tkr(Tracker::ReserveAndCommit, thread);
374    tkr.record(addr, size, flags, pc);
375  }
376
377  static inline void record_virtual_memory_release(address addr, size_t size,
378      Thread* thread = NULL) {
379    if (is_on()) {
380      Tracker tkr(Tracker::Release, thread);
381      tkr.record(addr, size);
382    }
383  }
384
385  // record memory type on virtual memory base address
386  static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
387                            Thread* thread = NULL) {
388    Tracker tkr(Tracker::Type);
389    tkr.record(base, 0, flags);
390  }
391
392  // Get memory trackers for memory operations that can result race conditions.
393  // The memory tracker has to be obtained before realloc, virtual memory uncommit
394  // and virtual memory release, and call tracker.record() method if operation
395  // succeeded, or tracker.discard() to abort the tracking.
396  static inline Tracker get_realloc_tracker() {
397    return Tracker(Tracker::Realloc);
398  }
399
400  static inline Tracker get_virtual_memory_uncommit_tracker() {
401    return Tracker(Tracker::Uncommit);
402  }
403
404  static inline Tracker get_virtual_memory_release_tracker() {
405    return Tracker(Tracker::Release);
406  }
407
408
409  // create memory baseline of current memory snapshot
410  static bool baseline();
411  // is there a memory baseline
412  static bool has_baseline() {
413    return _baseline.baselined();
414  }
415
416  // print memory usage from current snapshot
417  static bool print_memory_usage(BaselineOutputer& out, size_t unit,
418           bool summary_only = true);
419  // compare memory usage between current snapshot and baseline
420  static bool compare_memory_usage(BaselineOutputer& out, size_t unit,
421           bool summary_only = true);
422
423  // the version for whitebox testing support, it ensures that all memory
424  // activities before this method call, are reflected in the snapshot
425  // database.
426  static bool wbtest_wait_for_data_merge();
427
428  // sync is called within global safepoint to synchronize nmt data
429  static void sync();
430
431  // called when a thread is about to exit
432  static void thread_exiting(JavaThread* thread);
433
434  // retrieve global snapshot
435  static MemSnapshot* get_snapshot() {
436    if (shutdown_in_progress()) {
437      return NULL;
438    }
439    return _snapshot;
440  }
441
442  // print tracker stats
443  NOT_PRODUCT(static void print_tracker_stats(outputStream* st);)
444  NOT_PRODUCT(static void walk_stack(int toSkip, char* buf, int len);)
445
446 private:
447  // start native memory tracking worker thread
448  static bool start_worker(MemSnapshot* snapshot);
449
450  // called by worker thread to complete shutdown process
451  static void final_shutdown();
452
453 protected:
454  // retrieve per-thread recorder of the specified thread.
455  // if the recorder is full, it will be enqueued to overflow
456  // queue, a new recorder is acquired from recorder pool or a
457  // new instance is created.
458  // when thread == NULL, it means global recorder
459  static MemRecorder* get_thread_recorder(JavaThread* thread);
460
461  // per-thread recorder pool
462  static void release_thread_recorder(MemRecorder* rec);
463  static void delete_all_pooled_recorders();
464
465  // pending recorder queue. Recorders are queued to pending queue
466  // when they are overflowed or collected at nmt sync point.
467  static void enqueue_pending_recorder(MemRecorder* rec);
468  static MemRecorder* get_pending_recorders();
469  static void delete_all_pending_recorders();
470
471  // write a memory tracking record in recorder
472  static void write_tracking_record(address addr, MEMFLAGS type,
473    size_t size, jint seq, address pc, JavaThread* thread);
474
475  static bool is_single_threaded_bootstrap() {
476    return _state == NMT_bootstrapping_single_thread;
477  }
478
479  static void check_NMT_load(Thread* thr) {
480    assert(thr != NULL, "Sanity check");
481    if (_slowdown_calling_thread && thr != _worker_thread) {
482#ifdef _WINDOWS
483      // On Windows, os::NakedYield() does not work as well
484      // as short sleep.
485      os::naked_short_sleep(1);
486#else
487      os::naked_yield();
488#endif
489    }
490  }
491
492  static void inc_pending_op_count() {
493    Atomic::inc(&_pending_op_count);
494  }
495
496  static void dec_pending_op_count() {
497    Atomic::dec(&_pending_op_count);
498    assert(_pending_op_count >= 0, "Sanity check");
499  }
500
501
502 private:
503  // retrieve a pooled memory record or create new one if there is not
504  // one available
505  static MemRecorder* get_new_or_pooled_instance();
506  static void create_memory_record(address addr, MEMFLAGS type,
507                   size_t size, address pc, Thread* thread);
508  static void create_record_in_recorder(address addr, MEMFLAGS type,
509                   size_t size, address pc, JavaThread* thread);
510
511  static void set_current_processing_generation(unsigned long generation) {
512    _worker_thread_idle = false;
513    _processing_generation = generation;
514  }
515
516  static void report_worker_idle() {
517    _worker_thread_idle = true;
518  }
519
520 private:
521  // global memory snapshot
522  static MemSnapshot*     _snapshot;
523
524  // a memory baseline of snapshot
525  static MemBaseline      _baseline;
526
527  // query lock
528  static Mutex*           _query_lock;
529
530  // a thread can start to allocate memory before it is attached
531  // to VM 'Thread', those memory activities are recorded here.
532  // ThreadCritical is required to guard this global recorder.
533  static MemRecorder* volatile _global_recorder;
534
535  // main thread id
536  debug_only(static intx   _main_thread_tid;)
537
538  // pending recorders to be merged
539  static MemRecorder* volatile     _merge_pending_queue;
540
541  NOT_PRODUCT(static volatile jint   _pending_recorder_count;)
542
543  // pooled memory recorders
544  static MemRecorder* volatile     _pooled_recorders;
545
546  // memory recorder pool management, uses following
547  // counter to determine if a released memory recorder
548  // should be pooled
549
550  // latest thread count
551  static int               _thread_count;
552  // pooled recorder count
553  static volatile jint     _pooled_recorder_count;
554
555
556  // worker thread to merge pending recorders into snapshot
557  static MemTrackWorker*  _worker_thread;
558
559  // how many safepoints we skipped without entering sync point
560  static int              _sync_point_skip_count;
561
562  // if the tracker is properly intialized
563  static bool             _is_tracker_ready;
564  // tracking level (off, summary and detail)
565  static enum NMTLevel    _tracking_level;
566
567  // current nmt state
568  static volatile enum NMTStates   _state;
569  // the reason for shutting down nmt
570  static enum ShutdownReason       _reason;
571  // the generation that NMT is processing
572  static volatile unsigned long    _processing_generation;
573  // although NMT is still procesing current generation, but
574  // there is not more recorder to process, set idle state
575  static volatile bool             _worker_thread_idle;
576
577  // if NMT should slow down calling thread to allow
578  // worker thread to catch up
579  static volatile bool             _slowdown_calling_thread;
580
581  // pending memory op count.
582  // Certain memory ops need to pre-reserve sequence number
583  // before memory operation can happen to avoid race condition.
584  // See MemTracker::Tracker for detail
585  static volatile jint             _pending_op_count;
586};
587
588#endif // !INCLUDE_NMT
589
590#endif // SHARE_VM_SERVICES_MEM_TRACKER_HPP
591