fprofiler.hpp revision 3465:d2a62e0f25eb
1/*
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_RUNTIME_FPROFILER_HPP
26#define SHARE_VM_RUNTIME_FPROFILER_HPP
27
28#include "runtime/timer.hpp"
29#ifdef TARGET_OS_FAMILY_linux
30# include "thread_linux.inline.hpp"
31#endif
32#ifdef TARGET_OS_FAMILY_solaris
33# include "thread_solaris.inline.hpp"
34#endif
35#ifdef TARGET_OS_FAMILY_windows
36# include "thread_windows.inline.hpp"
37#endif
38#ifdef TARGET_OS_FAMILY_bsd
39# include "thread_bsd.inline.hpp"
40#endif
41
42// a simple flat profiler for Java
43
44
45// Forward declaration of classes defined in this header file
46class ThreadProfiler;
47class ThreadProfilerMark;
48class FlatProfiler;
49class IntervalData;
50
51// Declarations of classes defined only in the implementation.
52class ProfilerNode;
53class FlatProfilerTask;
54
55enum TickPosition {
56  tp_code,
57  tp_native
58};
59
60// One of these guys is constructed as we enter interesting regions
61// and destructed as we exit the region.  While we are in the region
62// ticks are allotted to the region.
63class ThreadProfilerMark: public StackObj {
64public:
65  // For now, the only thread-specific region is the class loader.
66  enum Region { noRegion, classLoaderRegion, extraRegion, maxRegion };
67
68  ThreadProfilerMark(Region)  KERNEL_RETURN;
69  ~ThreadProfilerMark()       KERNEL_RETURN;
70
71private:
72  ThreadProfiler* _pp;
73  Region _r;
74};
75
76#ifndef FPROF_KERNEL
77
78class IntervalData VALUE_OBJ_CLASS_SPEC {
79  // Just to keep these things all together
80private:
81  int _interpreted;
82  int _compiled;
83  int _native;
84  int _compiling;
85public:
86  int interpreted() {
87    return _interpreted;
88  }
89  int compiled() {
90    return _compiled;
91  }
92  int native() {
93    return _native;
94  }
95  int compiling() {
96    return _compiling;
97  }
98  int total() {
99    return (interpreted() + compiled() + native() + compiling());
100  }
101  void inc_interpreted() {
102    _interpreted += 1;
103  }
104  void inc_compiled() {
105    _compiled += 1;
106  }
107  void inc_native() {
108    _native += 1;
109  }
110  void inc_compiling() {
111    _compiling += 1;
112  }
113  void reset() {
114    _interpreted = 0;
115    _compiled = 0;
116    _native = 0;
117    _compiling = 0;
118  }
119  static void print_header(outputStream* st);
120  void print_data(outputStream* st);
121};
122#endif // FPROF_KERNEL
123
124class ThreadProfiler: public CHeapObj<mtInternal> {
125public:
126  ThreadProfiler()    KERNEL_RETURN;
127  ~ThreadProfiler()   KERNEL_RETURN;
128
129  // Resets the profiler
130  void reset()        KERNEL_RETURN;
131
132  // Activates the profiler for a certain thread
133  void engage()       KERNEL_RETURN;
134
135  // Deactivates the profiler
136  void disengage()    KERNEL_RETURN;
137
138  // Prints the collected profiling information
139  void print(const char* thread_name) KERNEL_RETURN;
140
141  // Garbage Collection Support
142  void oops_do(OopClosure* f)         KERNEL_RETURN;
143
144#ifndef FPROF_KERNEL
145private:
146  // for recording ticks.
147  friend class ProfilerNode;
148  char* area_bottom; // preallocated area for pnodes
149  char* area_top;
150  char* area_limit;
151  static int            table_size;
152  ProfilerNode** table;
153
154private:
155  void record_interpreted_tick(JavaThread* thread, frame fr, TickPosition where, int* ticks);
156  void record_compiled_tick   (JavaThread* thread, frame fr, TickPosition where);
157  void interpreted_update(methodOop method, TickPosition where);
158  void compiled_update   (methodOop method, TickPosition where);
159  void stub_update       (methodOop method, const char* name, TickPosition where);
160  void adapter_update    (TickPosition where);
161
162  void runtime_stub_update(const CodeBlob* stub, const char* name, TickPosition where);
163  void unknown_compiled_update    (const CodeBlob* cb, TickPosition where);
164
165  void vm_update    (TickPosition where);
166  void vm_update    (const char* name, TickPosition where);
167
168  void record_tick_for_running_frame(JavaThread* thread, frame fr);
169  void record_tick_for_calling_frame(JavaThread* thread, frame fr);
170
171  void initialize();
172
173  static int  entry(int value);
174
175
176private:
177  friend class FlatProfiler;
178  void record_tick(JavaThread* thread);
179  bool engaged;
180  // so we can do percentages for this thread, and quick checks for activity
181  int thread_ticks;
182  int compiler_ticks;
183  int interpreter_ticks;
184
185public:
186  void inc_thread_ticks() { thread_ticks += 1; }
187
188private:
189  friend class ThreadProfilerMark;
190  // counters for thread-specific regions
191  bool region_flag[ThreadProfilerMark::maxRegion];
192  int class_loader_ticks;
193  int extra_ticks;
194
195private:
196  // other thread-specific regions
197  int blocked_ticks;
198  enum UnknownTickSites {
199      ut_null_method,
200      ut_vtable_stubs,
201      ut_running_frame,
202      ut_calling_frame,
203      ut_no_pc,
204      ut_no_last_Java_frame,
205      ut_unknown_thread_state,
206      ut_end
207  };
208  int unknown_ticks_array[ut_end];
209  int unknown_ticks() {
210    int result = 0;
211    for (int ut = 0; ut < ut_end; ut += 1) {
212      result += unknown_ticks_array[ut];
213    }
214    return result;
215  }
216
217  elapsedTimer timer;
218
219  // For interval timing
220private:
221  IntervalData _interval_data;
222  IntervalData interval_data() {
223    return _interval_data;
224  }
225  IntervalData* interval_data_ref() {
226    return &_interval_data;
227  }
228#endif // FPROF_KERNEL
229};
230
231class FlatProfiler: AllStatic {
232public:
233  static void reset() KERNEL_RETURN ;
234  static void engage(JavaThread* mainThread, bool fullProfile) KERNEL_RETURN ;
235  static void disengage() KERNEL_RETURN ;
236  static void print(int unused) KERNEL_RETURN ;
237  static bool is_active() KERNEL_RETURN_(false) ;
238
239  // This is NULL if each thread has its own thread profiler,
240  // else this is the single thread profiler used by all threads.
241  // In particular it makes a difference during garbage collection,
242  // where you only want to traverse each thread profiler once.
243  static ThreadProfiler* get_thread_profiler() KERNEL_RETURN_(NULL);
244
245  // Garbage Collection Support
246  static void oops_do(OopClosure* f) KERNEL_RETURN ;
247
248  // Support for disassembler to inspect the PCRecorder
249
250  // Returns the start address for a given pc
251  // NULL is returned if the PCRecorder is inactive
252  static address bucket_start_for(address pc) KERNEL_RETURN_(NULL);
253
254  enum { MillisecsPerTick = 10 };   // ms per profiling ticks
255
256  // Returns the number of ticks recorded for the bucket
257  // pc belongs to.
258  static int bucket_count_for(address pc) KERNEL_RETURN_(0);
259
260#ifndef FPROF_KERNEL
261
262 private:
263  static bool full_profile() {
264    return full_profile_flag;
265  }
266
267  friend class ThreadProfiler;
268  // the following group of ticks cover everything that's not attributed to individual Java methods
269  static int  received_gc_ticks;      // ticks during which gc was active
270  static int vm_operation_ticks;      // total ticks in vm_operations other than GC
271  static int threads_lock_ticks;      // the number of times we couldn't get the Threads_lock without blocking
272  static int      blocked_ticks;      // ticks when the thread was blocked.
273  static int class_loader_ticks;      // total ticks in class loader
274  static int        extra_ticks;      // total ticks an extra temporary measuring
275  static int     compiler_ticks;      // total ticks in compilation
276  static int  interpreter_ticks;      // ticks in unknown interpreted method
277  static int        deopt_ticks;      // ticks in deoptimization
278  static int      unknown_ticks;      // ticks that cannot be categorized
279  static int     received_ticks;      // ticks that were received by task
280  static int    delivered_ticks;      // ticks that were delivered by task
281  static int non_method_ticks() {
282    return
283      ( received_gc_ticks
284      + vm_operation_ticks
285      + deopt_ticks
286      + threads_lock_ticks
287      + blocked_ticks
288      + compiler_ticks
289      + interpreter_ticks
290      + unknown_ticks );
291  }
292  static elapsedTimer timer;
293
294  // Counts of each of the byte codes
295  static int*           bytecode_ticks;
296  static int*           bytecode_ticks_stub;
297  static void print_byte_code_statistics();
298
299  // the ticks below are for continuous profiling (to adjust recompilation, etc.)
300  static int          all_ticks;      // total count of ticks received so far
301  static int      all_int_ticks;      // ticks in interpreter
302  static int     all_comp_ticks;      // ticks in compiled code (+ native)
303  static bool full_profile_flag;      // collecting full profile?
304
305  // to accumulate thread-specific data
306  // if we aren't profiling individual threads.
307  static ThreadProfiler* thread_profiler;
308  static ThreadProfiler* vm_thread_profiler;
309
310  static void allocate_table();
311
312  // The task that periodically interrupts things.
313  friend class FlatProfilerTask;
314  static FlatProfilerTask* task;
315  static void record_vm_operation();
316  static void record_vm_tick();
317  static void record_thread_ticks();
318
319  // For interval analysis
320 private:
321  static int interval_ticks_previous;  // delivered_ticks from the last interval
322  static void interval_record_thread(ThreadProfiler* tp); // extract ticks from ThreadProfiler.
323  static void interval_print();       // print interval data.
324  static void interval_reset();       // reset interval data.
325  enum {interval_print_size = 10};
326  static IntervalData* interval_data;
327#endif // FPROF_KERNEL
328};
329
330#endif // SHARE_VM_RUNTIME_FPROFILER_HPP
331