1/*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_RUNTIME_FPROFILER_HPP
26#define SHARE_VM_RUNTIME_FPROFILER_HPP
27
28#include "utilities/macros.hpp"
29#include "runtime/timer.hpp"
30
31// a simple flat profiler for Java
32
33
34// Forward declaration of classes defined in this header file
35class ThreadProfiler;
36class ThreadProfilerMark;
37class FlatProfiler;
38class IntervalData;
39
40// Declarations of classes defined only in the implementation.
41class ProfilerNode;
42class FlatProfilerTask;
43
44enum TickPosition {
45  tp_code,
46  tp_native
47};
48
49// One of these guys is constructed as we enter interesting regions
50// and destructed as we exit the region.  While we are in the region
51// ticks are allotted to the region.
52class ThreadProfilerMark: public StackObj {
53public:
54  // For now, the only thread-specific region is the class loader.
55  enum Region { noRegion, classLoaderRegion, extraRegion, maxRegion };
56
57  ThreadProfilerMark(Region)  NOT_FPROF_RETURN;
58  ~ThreadProfilerMark()       NOT_FPROF_RETURN;
59
60private:
61  ThreadProfiler* _pp;
62  Region _r;
63};
64
65#if INCLUDE_FPROF
66
67class IntervalData VALUE_OBJ_CLASS_SPEC {
68  // Just to keep these things all together
69private:
70  int _interpreted;
71  int _compiled;
72  int _native;
73  int _compiling;
74public:
75  int interpreted() {
76    return _interpreted;
77  }
78  int compiled() {
79    return _compiled;
80  }
81  int native() {
82    return _native;
83  }
84  int compiling() {
85    return _compiling;
86  }
87  int total() {
88    return (interpreted() + compiled() + native() + compiling());
89  }
90  void inc_interpreted() {
91    _interpreted += 1;
92  }
93  void inc_compiled() {
94    _compiled += 1;
95  }
96  void inc_native() {
97    _native += 1;
98  }
99  void inc_compiling() {
100    _compiling += 1;
101  }
102  void reset() {
103    _interpreted = 0;
104    _compiled = 0;
105    _native = 0;
106    _compiling = 0;
107  }
108  static void print_header(outputStream* st);
109  void print_data(outputStream* st);
110};
111#endif // INCLUDE_FPROF
112
113class ThreadProfiler: public CHeapObj<mtInternal> {
114public:
115  ThreadProfiler()    NOT_FPROF_RETURN;
116  ~ThreadProfiler()   NOT_FPROF_RETURN;
117
118  // Resets the profiler
119  void reset()        NOT_FPROF_RETURN;
120
121  // Activates the profiler for a certain thread
122  void engage()       NOT_FPROF_RETURN;
123
124  // Deactivates the profiler
125  void disengage()    NOT_FPROF_RETURN;
126
127  // Prints the collected profiling information
128  void print(const char* thread_name) NOT_FPROF_RETURN;
129
130  // Garbage Collection Support
131  void oops_do(OopClosure* f)         NOT_FPROF_RETURN;
132
133#if INCLUDE_FPROF
134private:
135  // for recording ticks.
136  friend class ProfilerNode;
137  char* area_bottom; // preallocated area for pnodes
138  char* area_top;
139  char* area_limit;
140  static int            table_size;
141  ProfilerNode** table;
142
143private:
144  void record_interpreted_tick(JavaThread* thread, frame fr, TickPosition where, int* ticks);
145  void record_compiled_tick   (JavaThread* thread, frame fr, TickPosition where);
146  void interpreted_update(Method* method, TickPosition where);
147  void compiled_update   (Method* method, TickPosition where);
148  void stub_update       (Method* method, const char* name, TickPosition where);
149  void adapter_update    (TickPosition where);
150
151  void runtime_stub_update(const CodeBlob* stub, const char* name, TickPosition where);
152  void unknown_compiled_update    (const CodeBlob* cb, TickPosition where);
153
154  void vm_update    (TickPosition where);
155  void vm_update    (const char* name, TickPosition where);
156
157  void record_tick_for_running_frame(JavaThread* thread, frame fr);
158  void record_tick_for_calling_frame(JavaThread* thread, frame fr);
159
160  void initialize();
161
162  static int  entry(int value);
163
164
165private:
166  friend class FlatProfiler;
167  void record_tick(JavaThread* thread);
168  bool engaged;
169  // so we can do percentages for this thread, and quick checks for activity
170  int thread_ticks;
171  int compiler_ticks;
172  int interpreter_ticks;
173
174public:
175  void inc_thread_ticks() { thread_ticks += 1; }
176
177private:
178  friend class ThreadProfilerMark;
179  // counters for thread-specific regions
180  bool region_flag[ThreadProfilerMark::maxRegion];
181  int class_loader_ticks;
182  int extra_ticks;
183
184private:
185  // other thread-specific regions
186  int blocked_ticks;
187  enum UnknownTickSites {
188      ut_null_method,
189      ut_vtable_stubs,
190      ut_running_frame,
191      ut_calling_frame,
192      ut_no_pc,
193      ut_no_last_Java_frame,
194      ut_unknown_thread_state,
195      ut_end
196  };
197  int unknown_ticks_array[ut_end];
198  int unknown_ticks() {
199    int result = 0;
200    for (int ut = 0; ut < ut_end; ut += 1) {
201      result += unknown_ticks_array[ut];
202    }
203    return result;
204  }
205
206  elapsedTimer timer;
207
208  // For interval timing
209private:
210  IntervalData _interval_data;
211  IntervalData interval_data() {
212    return _interval_data;
213  }
214  IntervalData* interval_data_ref() {
215    return &_interval_data;
216  }
217#endif // INCLUDE_FPROF
218};
219
220class FlatProfiler: AllStatic {
221public:
222  static void reset() NOT_FPROF_RETURN ;
223  static void engage(JavaThread* mainThread, bool fullProfile) NOT_FPROF_RETURN ;
224  static void disengage() NOT_FPROF_RETURN ;
225  static void print(int unused) NOT_FPROF_RETURN ;
226  static bool is_active() NOT_FPROF_RETURN_(false) ;
227
228  // This is NULL if each thread has its own thread profiler,
229  // else this is the single thread profiler used by all threads.
230  // In particular it makes a difference during garbage collection,
231  // where you only want to traverse each thread profiler once.
232  static ThreadProfiler* get_thread_profiler() NOT_FPROF_RETURN_(NULL);
233
234  // Garbage Collection Support
235  static void oops_do(OopClosure* f) NOT_FPROF_RETURN ;
236
237  // Support for disassembler to inspect the PCRecorder
238
239  // Returns the start address for a given pc
240  // NULL is returned if the PCRecorder is inactive
241  static address bucket_start_for(address pc) NOT_FPROF_RETURN_(NULL);
242
243  enum { MillisecsPerTick = 10 };   // ms per profiling ticks
244
245  // Returns the number of ticks recorded for the bucket
246  // pc belongs to.
247  static int bucket_count_for(address pc) NOT_FPROF_RETURN_(0);
248
249#if INCLUDE_FPROF
250
251 private:
252  static bool full_profile() {
253    return full_profile_flag;
254  }
255
256  friend class ThreadProfiler;
257  // the following group of ticks cover everything that's not attributed to individual Java methods
258  static int  received_gc_ticks;      // ticks during which gc was active
259  static int vm_operation_ticks;      // total ticks in vm_operations other than GC
260  static int threads_lock_ticks;      // the number of times we couldn't get the Threads_lock without blocking
261  static int      blocked_ticks;      // ticks when the thread was blocked.
262  static int class_loader_ticks;      // total ticks in class loader
263  static int        extra_ticks;      // total ticks an extra temporary measuring
264  static int     compiler_ticks;      // total ticks in compilation
265  static int  interpreter_ticks;      // ticks in unknown interpreted method
266  static int        deopt_ticks;      // ticks in deoptimization
267  static int      unknown_ticks;      // ticks that cannot be categorized
268  static int     received_ticks;      // ticks that were received by task
269  static int    delivered_ticks;      // ticks that were delivered by task
270  static int non_method_ticks() {
271    return
272      ( received_gc_ticks
273      + vm_operation_ticks
274      + deopt_ticks
275      + threads_lock_ticks
276      + blocked_ticks
277      + compiler_ticks
278      + interpreter_ticks
279      + unknown_ticks );
280  }
281  static elapsedTimer timer;
282
283  // Counts of each of the byte codes
284  static int*           bytecode_ticks;
285  static int*           bytecode_ticks_stub;
286  static void print_byte_code_statistics();
287
288  // the ticks below are for continuous profiling (to adjust recompilation, etc.)
289  static int          all_ticks;      // total count of ticks received so far
290  static int      all_int_ticks;      // ticks in interpreter
291  static int     all_comp_ticks;      // ticks in compiled code (+ native)
292  static bool full_profile_flag;      // collecting full profile?
293
294  // to accumulate thread-specific data
295  // if we aren't profiling individual threads.
296  static ThreadProfiler* thread_profiler;
297  static ThreadProfiler* vm_thread_profiler;
298
299  static void allocate_table();
300
301  // The task that periodically interrupts things.
302  friend class FlatProfilerTask;
303  static FlatProfilerTask* task;
304  static void record_vm_operation();
305  static void record_vm_tick();
306  static void record_thread_ticks();
307
308  // For interval analysis
309 private:
310  static int interval_ticks_previous;  // delivered_ticks from the last interval
311  static void interval_record_thread(ThreadProfiler* tp); // extract ticks from ThreadProfiler.
312  static void interval_print();       // print interval data.
313  static void interval_reset();       // reset interval data.
314  enum {interval_print_size = 10};
315  static IntervalData* interval_data;
316#endif // INCLUDE_FPROF
317};
318
319#endif // SHARE_VM_RUNTIME_FPROFILER_HPP
320