g1GCPhaseTimes.cpp revision 8638:767f36deb0dc
169408Sache/*
269408Sache * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
3100616Smp * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4231990Smp *
5195609Smp * This code is free software; you can redistribute it and/or modify it
6100616Smp * under the terms of the GNU General Public License version 2 only, as
7231990Smp * published by the Free Software Foundation.
8100616Smp *
969408Sache * This code is distributed in the hope that it will be useful, but WITHOUT
1069408Sache * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
1169408Sache * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
1269408Sache * version 2 for more details (a copy is included in the LICENSE file that
1369408Sache * accompanied this code).
1469408Sache *
1569408Sache * You should have received a copy of the GNU General Public License version
1669408Sache * 2 along with this work; if not, write to the Free Software Foundation,
1769408Sache * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
1869408Sache *
1969408Sache * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
2069408Sache * or visit www.oracle.com if you need additional information or have any
21195609Smp * questions.
22195609Smp *
2369408Sache */
2469408Sache
2569408Sache#include "precompiled.hpp"
2669408Sache#include "gc/g1/g1CollectedHeap.inline.hpp"
2769408Sache#include "gc/g1/g1GCPhaseTimes.hpp"
2869408Sache#include "gc/g1/g1Log.hpp"
29195609Smp#include "gc/g1/g1StringDedup.hpp"
30231990Smp#include "memory/allocation.hpp"
31231990Smp#include "runtime/os.hpp"
32231990Smp
3369408Sache// Helper class for avoiding interleaved logging
3469408Sacheclass LineBuffer: public StackObj {
3569408Sache
3669408Sacheprivate:
3769408Sache  static const int BUFFER_LEN = 1024;
38231990Smp  static const int INDENT_CHARS = 3;
39231990Smp  char _buffer[BUFFER_LEN];
4069408Sache  int _indent_level;
41100616Smp  int _cur;
42100616Smp
43100616Smp  void vappend(const char* format, va_list ap)  ATTRIBUTE_PRINTF(2, 0) {
44100616Smp    int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
45100616Smp    if (res != -1) {
46100616Smp      _cur += res;
47100616Smp    } else {
48100616Smp      DEBUG_ONLY(warning("buffer too small in LineBuffer");)
49100616Smp      _buffer[BUFFER_LEN -1] = 0;
50100616Smp      _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
51100616Smp    }
52100616Smp  }
53100616Smp
54100616Smppublic:
55100616Smp  explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
56100616Smp    for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
57100616Smp      _buffer[_cur] = ' ';
58100616Smp    }
59231990Smp  }
60231990Smp
61231990Smp#ifndef PRODUCT
62100616Smp  ~LineBuffer() {
63100616Smp    assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
64100616Smp  }
65100616Smp#endif
66100616Smp
67100616Smp  void append(const char* format, ...)  ATTRIBUTE_PRINTF(2, 3) {
68100616Smp    va_list ap;
69100616Smp    va_start(ap, format);
70100616Smp    vappend(format, ap);
71100616Smp    va_end(ap);
72100616Smp  }
73195609Smp
74100616Smp  void print_cr() {
75195609Smp    gclog_or_tty->print_cr("%s", _buffer);
76100616Smp    _cur = _indent_level * INDENT_CHARS;
77195609Smp  }
78100616Smp
79100616Smp  void append_and_print_cr(const char* format, ...)  ATTRIBUTE_PRINTF(2, 3) {
80100616Smp    va_list ap;
81100616Smp    va_start(ap, format);
82100616Smp    vappend(format, ap);
83100616Smp    va_end(ap);
84100616Smp    print_cr();
85100616Smp  }
86100616Smp};
87100616Smp
88100616Smptemplate <class T>
89100616Smpclass WorkerDataArray  : public CHeapObj<mtGC> {
90100616Smp  friend class G1GCParPhasePrinter;
91100616Smp  T*          _data;
92100616Smp  uint        _length;
9383098Smp  const char* _title;
9483098Smp  bool        _print_sum;
95100616Smp  int         _log_level;
9683098Smp  uint        _indent_level;
97100616Smp  bool        _enabled;
98100616Smp
99100616Smp  WorkerDataArray<size_t>* _thread_work_items;
100100616Smp
101100616Smp  NOT_PRODUCT(T uninitialized();)
102100616Smp
103100616Smp  // We are caching the sum and average to only have to calculate them once.
104100616Smp  // This is not done in an MT-safe way. It is intended to allow single
105195609Smp  // threaded code to call sum() and average() multiple times in any order
106100616Smp  // without having to worry about the cost.
107195609Smp  bool   _has_new_data;
108195609Smp  T      _sum;
109195609Smp  T      _min;
110195609Smp  T      _max;
111195609Smp  double _average;
112195609Smp
113195609Smp public:
114195609Smp  WorkerDataArray(uint length, const char* title, bool print_sum, int log_level, uint indent_level) :
115195609Smp    _title(title), _length(0), _print_sum(print_sum), _log_level(log_level), _indent_level(indent_level),
116195609Smp    _has_new_data(true), _thread_work_items(NULL), _enabled(true) {
117100616Smp    assert(length > 0, "Must have some workers to store data for");
118100616Smp    _length = length;
119100616Smp    _data = NEW_C_HEAP_ARRAY(T, _length, mtGC);
120195609Smp  }
121100616Smp
122100616Smp  ~WorkerDataArray() {
123100616Smp    FREE_C_HEAP_ARRAY(T, _data);
124100616Smp  }
125100616Smp
126100616Smp  void link_thread_work_items(WorkerDataArray<size_t>* thread_work_items) {
127100616Smp    _thread_work_items = thread_work_items;
128100616Smp  }
129100616Smp
130195609Smp  WorkerDataArray<size_t>* thread_work_items() { return _thread_work_items; }
131100616Smp
13269408Sache  void set(uint worker_i, T value) {
133100616Smp    assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
13469408Sache    assert(_data[worker_i] == WorkerDataArray<T>::uninitialized(), err_msg("Overwriting data for worker %d in %s", worker_i, _title));
13569408Sache    _data[worker_i] = value;
13669408Sache    _has_new_data = true;
13769408Sache  }
13869408Sache
13969408Sache  void set_thread_work_item(uint worker_i, size_t value) {
140100616Smp    assert(_thread_work_items != NULL, "No sub count");
14169408Sache    _thread_work_items->set(worker_i, value);
14269408Sache  }
14369408Sache
14469408Sache  T get(uint worker_i) {
14569408Sache    assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
146100616Smp    assert(_data[worker_i] != WorkerDataArray<T>::uninitialized(), err_msg("No data added for worker %d", worker_i));
147100616Smp    return _data[worker_i];
148100616Smp  }
149100616Smp
150100616Smp  void add(uint worker_i, T value) {
151100616Smp    assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
152100616Smp    assert(_data[worker_i] != WorkerDataArray<T>::uninitialized(), err_msg("No data to add to for worker %d", worker_i));
153100616Smp    _data[worker_i] += value;
154100616Smp    _has_new_data = true;
155100616Smp  }
156100616Smp
157100616Smp  double average(uint active_threads){
158100616Smp    calculate_totals(active_threads);
159100616Smp    return _average;
160100616Smp  }
161100616Smp
162100616Smp  T sum(uint active_threads) {
163100616Smp    calculate_totals(active_threads);
164100616Smp    return _sum;
165195609Smp  }
166100616Smp
167100616Smp  T minimum(uint active_threads) {
168100616Smp    calculate_totals(active_threads);
169100616Smp    return _min;
170100616Smp  }
171100616Smp
172100616Smp  T maximum(uint active_threads) {
173100616Smp    calculate_totals(active_threads);
174195609Smp    return _max;
175100616Smp  }
176100616Smp
177100616Smp  void reset() PRODUCT_RETURN;
178100616Smp  void verify(uint active_threads) PRODUCT_RETURN;
179100616Smp
180100616Smp  void set_enabled(bool enabled) { _enabled = enabled; }
181100616Smp
182100616Smp  int log_level() { return _log_level;  }
183100616Smp
184100616Smp private:
185100616Smp
186100616Smp  void calculate_totals(uint active_threads){
187100616Smp    if (!_has_new_data) {
188195609Smp      return;
189195609Smp    }
190195609Smp
191195609Smp    _sum = (T)0;
192195609Smp    _min = _data[0];
193195609Smp    _max = _min;
194195609Smp    assert(active_threads <= _length, "Wrong number of active threads");
195195609Smp    for (uint i = 0; i < active_threads; ++i) {
196195609Smp      T val = _data[i];
197195609Smp      _sum += val;
198195609Smp      _min = MIN2(_min, val);
199195609Smp      _max = MAX2(_max, val);
200100616Smp    }
201100616Smp    _average = (double)_sum / (double)active_threads;
202100616Smp    _has_new_data = false;
203100616Smp  }
204195609Smp};
205100616Smp
206195609Smp
207195609Smp#ifndef PRODUCT
208195609Smp
209195609Smptemplate <>
210195609Smpsize_t WorkerDataArray<size_t>::uninitialized() {
211195609Smp  return (size_t)-1;
212195609Smp}
213195609Smp
214195609Smptemplate <>
215195609Smpdouble WorkerDataArray<double>::uninitialized() {
216195609Smp  return -1.0;
217195609Smp}
218195609Smp
219195609Smptemplate <class T>
220195609Smpvoid WorkerDataArray<T>::reset() {
22169408Sache  for (uint i = 0; i < _length; i++) {
222195609Smp    _data[i] = WorkerDataArray<T>::uninitialized();
223195609Smp  }
22469408Sache  if (_thread_work_items != NULL) {
225195609Smp    _thread_work_items->reset();
226195609Smp  }
227195609Smp}
228195609Smp
229195609Smptemplate <class T>
230195609Smpvoid WorkerDataArray<T>::verify(uint active_threads) {
231195609Smp  if (!_enabled) {
232195609Smp    return;
233195609Smp  }
234195609Smp
235195609Smp  assert(active_threads <= _length, "Wrong number of active threads");
236195609Smp  for (uint i = 0; i < active_threads; i++) {
237195609Smp    assert(_data[i] != WorkerDataArray<T>::uninitialized(),
238195609Smp        err_msg("Invalid data for worker %u in '%s'", i, _title));
239195609Smp  }
240195609Smp  if (_thread_work_items != NULL) {
241195609Smp    _thread_work_items->verify(active_threads);
242195609Smp  }
243195609Smp}
244195609Smp
245195609Smp#endif
246195609Smp
247195609SmpG1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
248195609Smp  _max_gc_threads(max_gc_threads)
249195609Smp{
250195609Smp  assert(max_gc_threads > 0, "Must have some GC threads");
251195609Smp
252195609Smp  _gc_par_phases[GCWorkerStart] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Start (ms)", false, G1Log::LevelFiner, 2);
253195609Smp  _gc_par_phases[ExtRootScan] = new WorkerDataArray<double>(max_gc_threads, "Ext Root Scanning (ms)", true, G1Log::LevelFiner, 2);
254195609Smp
255195609Smp  // Root scanning phases
256195609Smp  _gc_par_phases[ThreadRoots] = new WorkerDataArray<double>(max_gc_threads, "Thread Roots (ms)", true, G1Log::LevelFinest, 3);
257195609Smp  _gc_par_phases[StringTableRoots] = new WorkerDataArray<double>(max_gc_threads, "StringTable Roots (ms)", true, G1Log::LevelFinest, 3);
258195609Smp  _gc_par_phases[UniverseRoots] = new WorkerDataArray<double>(max_gc_threads, "Universe Roots (ms)", true, G1Log::LevelFinest, 3);
259195609Smp  _gc_par_phases[JNIRoots] = new WorkerDataArray<double>(max_gc_threads, "JNI Handles Roots (ms)", true, G1Log::LevelFinest, 3);
260195609Smp  _gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray<double>(max_gc_threads, "ObjectSynchronizer Roots (ms)", true, G1Log::LevelFinest, 3);
261195609Smp  _gc_par_phases[FlatProfilerRoots] = new WorkerDataArray<double>(max_gc_threads, "FlatProfiler Roots (ms)", true, G1Log::LevelFinest, 3);
262195609Smp  _gc_par_phases[ManagementRoots] = new WorkerDataArray<double>(max_gc_threads, "Management Roots (ms)", true, G1Log::LevelFinest, 3);
263195609Smp  _gc_par_phases[SystemDictionaryRoots] = new WorkerDataArray<double>(max_gc_threads, "SystemDictionary Roots (ms)", true, G1Log::LevelFinest, 3);
264195609Smp  _gc_par_phases[CLDGRoots] = new WorkerDataArray<double>(max_gc_threads, "CLDG Roots (ms)", true, G1Log::LevelFinest, 3);
265195609Smp  _gc_par_phases[JVMTIRoots] = new WorkerDataArray<double>(max_gc_threads, "JVMTI Roots (ms)", true, G1Log::LevelFinest, 3);
266195609Smp  _gc_par_phases[CMRefRoots] = new WorkerDataArray<double>(max_gc_threads, "CM RefProcessor Roots (ms)", true, G1Log::LevelFinest, 3);
267195609Smp  _gc_par_phases[WaitForStrongCLD] = new WorkerDataArray<double>(max_gc_threads, "Wait For Strong CLD (ms)", true, G1Log::LevelFinest, 3);
26869408Sache  _gc_par_phases[WeakCLDRoots] = new WorkerDataArray<double>(max_gc_threads, "Weak CLD Roots (ms)", true, G1Log::LevelFinest, 3);
26969408Sache  _gc_par_phases[SATBFiltering] = new WorkerDataArray<double>(max_gc_threads, "SATB Filtering (ms)", true, G1Log::LevelFinest, 3);
27069408Sache
27169408Sache  _gc_par_phases[UpdateRS] = new WorkerDataArray<double>(max_gc_threads, "Update RS (ms)", true, G1Log::LevelFiner, 2);
272195609Smp  _gc_par_phases[ScanRS] = new WorkerDataArray<double>(max_gc_threads, "Scan RS (ms)", true, G1Log::LevelFiner, 2);
273195609Smp  _gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scanning (ms)", true, G1Log::LevelFiner, 2);
27483098Smp  _gc_par_phases[ObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Object Copy (ms)", true, G1Log::LevelFiner, 2);
27583098Smp  _gc_par_phases[Termination] = new WorkerDataArray<double>(max_gc_threads, "Termination (ms)", true, G1Log::LevelFiner, 2);
27683098Smp  _gc_par_phases[GCWorkerTotal] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Total (ms)", true, G1Log::LevelFiner, 2);
27783098Smp  _gc_par_phases[GCWorkerEnd] = new WorkerDataArray<double>(max_gc_threads, "GC Worker End (ms)", false, G1Log::LevelFiner, 2);
27883098Smp  _gc_par_phases[Other] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Other (ms)", true, G1Log::LevelFiner, 2);
279195609Smp
28069408Sache  _update_rs_processed_buffers = new WorkerDataArray<size_t>(max_gc_threads, "Processed Buffers", true, G1Log::LevelFiner, 3);
28169408Sache  _gc_par_phases[UpdateRS]->link_thread_work_items(_update_rs_processed_buffers);
282195609Smp
28369408Sache  _termination_attempts = new WorkerDataArray<size_t>(max_gc_threads, "Termination Attempts", true, G1Log::LevelFinest, 3);
284100616Smp  _gc_par_phases[Termination]->link_thread_work_items(_termination_attempts);
285195609Smp
28669408Sache  _gc_par_phases[StringDedupQueueFixup] = new WorkerDataArray<double>(max_gc_threads, "Queue Fixup (ms)", true, G1Log::LevelFiner, 2);
28769408Sache  _gc_par_phases[StringDedupTableFixup] = new WorkerDataArray<double>(max_gc_threads, "Table Fixup (ms)", true, G1Log::LevelFiner, 2);
288195609Smp
289100616Smp  _gc_par_phases[RedirtyCards] = new WorkerDataArray<double>(max_gc_threads, "Parallel Redirty", true, G1Log::LevelFinest, 3);
290100616Smp  _redirtied_cards = new WorkerDataArray<size_t>(max_gc_threads, "Redirtied Cards", true, G1Log::LevelFinest, 3);
291195609Smp  _gc_par_phases[RedirtyCards]->link_thread_work_items(_redirtied_cards);
29283098Smp}
29383098Smp
294195609Smpvoid G1GCPhaseTimes::note_gc_start(uint active_gc_threads, bool mark_in_progress) {
295195609Smp  assert(active_gc_threads > 0, "The number of threads must be > 0");
296195609Smp  assert(active_gc_threads <= _max_gc_threads, "The number of active threads must be <= the max number of threads");
297195609Smp  _active_gc_threads = active_gc_threads;
298195609Smp
299195609Smp  for (int i = 0; i < GCParPhasesSentinel; i++) {
300195609Smp    _gc_par_phases[i]->reset();
30169408Sache  }
30269408Sache
303195609Smp  _gc_par_phases[StringDedupQueueFixup]->set_enabled(G1StringDedup::is_enabled());
304195609Smp  _gc_par_phases[StringDedupTableFixup]->set_enabled(G1StringDedup::is_enabled());
305195609Smp}
306195609Smp
307100616Smpvoid G1GCPhaseTimes::note_gc_end() {
30869408Sache  for (uint i = 0; i < _active_gc_threads; i++) {
309195609Smp    double worker_time = _gc_par_phases[GCWorkerEnd]->get(i) - _gc_par_phases[GCWorkerStart]->get(i);
31083098Smp    record_time_secs(GCWorkerTotal, i , worker_time);
31169408Sache
31269408Sache    double worker_known_time =
31369408Sache        _gc_par_phases[ExtRootScan]->get(i) +
31469408Sache        _gc_par_phases[SATBFiltering]->get(i) +
31569408Sache        _gc_par_phases[UpdateRS]->get(i) +
31669408Sache        _gc_par_phases[ScanRS]->get(i) +
317195609Smp        _gc_par_phases[CodeRoots]->get(i) +
31883098Smp        _gc_par_phases[ObjCopy]->get(i) +
31969408Sache        _gc_par_phases[Termination]->get(i);
320195609Smp
321195609Smp    record_time_secs(Other, i, worker_time - worker_known_time);
322195609Smp  }
323195609Smp
324195609Smp  for (int i = 0; i < GCParPhasesSentinel; i++) {
325100616Smp    _gc_par_phases[i]->verify(_active_gc_threads);
326195609Smp  }
327100616Smp}
328195609Smp
329195609Smpvoid G1GCPhaseTimes::print_stats(int level, const char* str, double value) {
330195609Smp  LineBuffer(level).append_and_print_cr("[%s: %.1lf ms]", str, value);
33169408Sache}
33269408Sache
333195609Smpvoid G1GCPhaseTimes::print_stats(int level, const char* str, size_t value) {
33469408Sache  LineBuffer(level).append_and_print_cr("[%s: " SIZE_FORMAT "]", str, value);
33569408Sache}
336195609Smp
337231990Smpvoid G1GCPhaseTimes::print_stats(int level, const char* str, double value, uint workers) {
338231990Smp  LineBuffer(level).append_and_print_cr("[%s: %.1lf ms, GC Workers: %u]", str, value, workers);
339231990Smp}
340195609Smp
341195609Smpdouble G1GCPhaseTimes::accounted_time_ms() {
342195609Smp    // Subtract the root region scanning wait time. It's initialized to
343195609Smp    // zero at the start of the pause.
344195609Smp    double misc_time_ms = _root_region_scan_wait_time_ms;
345195609Smp
346195609Smp    misc_time_ms += _cur_collection_par_time_ms;
347195609Smp
348195609Smp    // Now subtract the time taken to fix up roots in generated code
349195609Smp    misc_time_ms += _cur_collection_code_root_fixup_time_ms;
350195609Smp
351195609Smp    // Strong code root purge time
352195609Smp    misc_time_ms += _cur_strong_code_root_purge_time_ms;
353195609Smp
354195609Smp    if (G1StringDedup::is_enabled()) {
355195609Smp      // String dedup fixup time
35669408Sache      misc_time_ms += _cur_string_dedup_fixup_time_ms;
35769408Sache    }
35869408Sache
35969408Sache    // Subtract the time taken to clean the card table from the
36069408Sache    // current value of "other time"
361195609Smp    misc_time_ms += _cur_clear_ct_time_ms;
36269408Sache
36369408Sache    return misc_time_ms;
36469408Sache}
36569408Sache
36669408Sache// record the time a phase took in seconds
36769408Sachevoid G1GCPhaseTimes::record_time_secs(GCParPhases phase, uint worker_i, double secs) {
36869408Sache  _gc_par_phases[phase]->set(worker_i, secs);
36969408Sache}
370195609Smp
37169408Sache// add a number of seconds to a phase
37269408Sachevoid G1GCPhaseTimes::add_time_secs(GCParPhases phase, uint worker_i, double secs) {
373195609Smp  _gc_par_phases[phase]->add(worker_i, secs);
37469408Sache}
375100616Smp
37669408Sachevoid G1GCPhaseTimes::record_thread_work_item(GCParPhases phase, uint worker_i, size_t count) {
37769408Sache  _gc_par_phases[phase]->set_thread_work_item(worker_i, count);
37869408Sache}
37969408Sache
38069408Sache// return the average time for a phase in milliseconds
38169408Sachedouble G1GCPhaseTimes::average_time_ms(GCParPhases phase) {
38269408Sache  return _gc_par_phases[phase]->average(_active_gc_threads) * 1000.0;
38369408Sache}
38469408Sache
385195609Smpdouble G1GCPhaseTimes::get_time_ms(GCParPhases phase, uint worker_i) {
38669408Sache  return _gc_par_phases[phase]->get(worker_i) * 1000.0;
38769408Sache}
388195609Smp
38983098Smpdouble G1GCPhaseTimes::sum_time_ms(GCParPhases phase) {
39083098Smp  return _gc_par_phases[phase]->sum(_active_gc_threads) * 1000.0;
391100616Smp}
39283098Smp
39383098Smpdouble G1GCPhaseTimes::min_time_ms(GCParPhases phase) {
39483098Smp  return _gc_par_phases[phase]->minimum(_active_gc_threads) * 1000.0;
39583098Smp}
39683098Smp
39783098Smpdouble G1GCPhaseTimes::max_time_ms(GCParPhases phase) {
39883098Smp  return _gc_par_phases[phase]->maximum(_active_gc_threads) * 1000.0;
399195609Smp}
40083098Smp
40183098Smpsize_t G1GCPhaseTimes::get_thread_work_item(GCParPhases phase, uint worker_i) {
402195609Smp  assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
40383098Smp  return _gc_par_phases[phase]->thread_work_items()->get(worker_i);
40483098Smp}
405195609Smp
40683098Smpsize_t G1GCPhaseTimes::sum_thread_work_items(GCParPhases phase) {
40783098Smp  assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
408195609Smp  return _gc_par_phases[phase]->thread_work_items()->sum(_active_gc_threads);
40983098Smp}
41083098Smp
411195609Smpdouble G1GCPhaseTimes::average_thread_work_items(GCParPhases phase) {
41283098Smp  assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
41383098Smp  return _gc_par_phases[phase]->thread_work_items()->average(_active_gc_threads);
414195609Smp}
415195609Smp
416195609Smpsize_t G1GCPhaseTimes::min_thread_work_items(GCParPhases phase) {
417195609Smp  assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
41869408Sache  return _gc_par_phases[phase]->thread_work_items()->minimum(_active_gc_threads);
41969408Sache}
420195609Smp
42169408Sachesize_t G1GCPhaseTimes::max_thread_work_items(GCParPhases phase) {
42269408Sache  assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
423195609Smp  return _gc_par_phases[phase]->thread_work_items()->maximum(_active_gc_threads);
42469408Sache}
42569408Sache
426195609Smpclass G1GCParPhasePrinter : public StackObj {
42769408Sache  G1GCPhaseTimes* _phase_times;
42869408Sache public:
429195609Smp  G1GCParPhasePrinter(G1GCPhaseTimes* phase_times) : _phase_times(phase_times) {}
43083098Smp
43169408Sache  void print(G1GCPhaseTimes::GCParPhases phase_id) {
432195609Smp    WorkerDataArray<double>* phase = _phase_times->_gc_par_phases[phase_id];
43369408Sache
434100616Smp    if (phase->_log_level > G1Log::level() || !phase->_enabled) {
43583098Smp      return;
43683098Smp    }
437100616Smp
43883098Smp    if (phase->_length == 1) {
43983098Smp      print_single_length(phase_id, phase);
44083098Smp    } else {
44183098Smp      print_multi_length(phase_id, phase);
44269408Sache    }
44369408Sache  }
44469408Sache
44569408Sache private:
44669408Sache
44769408Sache  void print_single_length(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {
44869408Sache    // No need for min, max, average and sum for only one worker
44969408Sache    LineBuffer buf(phase->_indent_level);
45069408Sache    buf.append_and_print_cr("[%s:  %.1lf]", phase->_title, _phase_times->get_time_ms(phase_id, 0));
45169408Sache
45269408Sache    if (phase->_thread_work_items != NULL) {
45369408Sache      LineBuffer buf2(phase->_thread_work_items->_indent_level);
45469408Sache      buf2.append_and_print_cr("[%s:  " SIZE_FORMAT "]", phase->_thread_work_items->_title, _phase_times->sum_thread_work_items(phase_id));
45569408Sache    }
456195609Smp  }
457195609Smp
458195609Smp  void print_time_values(LineBuffer& buf, G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {
459195609Smp    uint active_length = _phase_times->_active_gc_threads;
46069408Sache    for (uint i = 0; i < active_length; ++i) {
461195609Smp      buf.append("  %.1lf", _phase_times->get_time_ms(phase_id, i));
462100616Smp    }
463100616Smp    buf.print_cr();
464195609Smp  }
465195609Smp
466100616Smp  void print_count_values(LineBuffer& buf, G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<size_t>* thread_work_items) {
467195609Smp    uint active_length = _phase_times->_active_gc_threads;
468195609Smp    for (uint i = 0; i < active_length; ++i) {
469195609Smp      buf.append("  " SIZE_FORMAT, _phase_times->get_thread_work_item(phase_id, i));
470195609Smp    }
47169408Sache    buf.print_cr();
47269408Sache  }
473195609Smp
47469408Sache  void print_thread_work_items(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<size_t>* thread_work_items) {
47569408Sache    LineBuffer buf(thread_work_items->_indent_level);
476195609Smp    buf.append("[%s:", thread_work_items->_title);
47769408Sache
47869408Sache    if (G1Log::finest()) {
479195609Smp      print_count_values(buf, phase_id, thread_work_items);
48069408Sache    }
48169408Sache
482195609Smp    assert(thread_work_items->_print_sum, err_msg("%s does not have print sum true even though it is a count", thread_work_items->_title));
48369408Sache
48469408Sache    buf.append_and_print_cr(" Min: " SIZE_FORMAT ", Avg: %.1lf, Max: " SIZE_FORMAT ", Diff: " SIZE_FORMAT ", Sum: " SIZE_FORMAT "]",
48569408Sache        _phase_times->min_thread_work_items(phase_id), _phase_times->average_thread_work_items(phase_id), _phase_times->max_thread_work_items(phase_id),
486100616Smp        _phase_times->max_thread_work_items(phase_id) - _phase_times->min_thread_work_items(phase_id), _phase_times->sum_thread_work_items(phase_id));
48783098Smp  }
48883098Smp
48983098Smp  void print_multi_length(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {
49083098Smp    LineBuffer buf(phase->_indent_level);
49169408Sache    buf.append("[%s:", phase->_title);
49283098Smp
49383098Smp    if (G1Log::finest()) {
49483098Smp      print_time_values(buf, phase_id, phase);
49569408Sache    }
49683098Smp
49769408Sache    buf.append(" Min: %.1lf, Avg: %.1lf, Max: %.1lf, Diff: %.1lf",
498195609Smp        _phase_times->min_time_ms(phase_id), _phase_times->average_time_ms(phase_id), _phase_times->max_time_ms(phase_id),
49969408Sache        _phase_times->max_time_ms(phase_id) - _phase_times->min_time_ms(phase_id));
50069408Sache
501195609Smp    if (phase->_print_sum) {
50269408Sache      // for things like the start and end times the sum is not
50369408Sache      // that relevant
50469408Sache      buf.append(", Sum: %.1lf", _phase_times->sum_time_ms(phase_id));
505195609Smp    }
50669408Sache
50769408Sache    buf.append_and_print_cr("]");
508195609Smp
50969408Sache    if (phase->_thread_work_items != NULL) {
51069408Sache      print_thread_work_items(phase_id, phase->_thread_work_items);
511195609Smp    }
51269408Sache  }
51369408Sache};
514195609Smp
51569408Sachevoid G1GCPhaseTimes::print(double pause_time_sec) {
516195609Smp  G1GCParPhasePrinter par_phase_printer(this);
517195609Smp
518100616Smp  if (_root_region_scan_wait_time_ms > 0.0) {
51969408Sache    print_stats(1, "Root Region Scan Waiting", _root_region_scan_wait_time_ms);
520195609Smp  }
521100616Smp
522100616Smp  print_stats(1, "Parallel Time", _cur_collection_par_time_ms, _active_gc_threads);
523100616Smp  for (int i = 0; i <= GCMainParPhasesLast; i++) {
524100616Smp    par_phase_printer.print((GCParPhases) i);
525100616Smp  }
526100616Smp
527100616Smp  print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
528195609Smp  print_stats(1, "Code Root Purge", _cur_strong_code_root_purge_time_ms);
52969408Sache  if (G1StringDedup::is_enabled()) {
53069408Sache    print_stats(1, "String Dedup Fixup", _cur_string_dedup_fixup_time_ms, _active_gc_threads);
531100616Smp    for (int i = StringDedupPhasesFirst; i <= StringDedupPhasesLast; i++) {
53283098Smp      par_phase_printer.print((GCParPhases) i);
53369408Sache    }
53469408Sache  }
53569408Sache  print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
53669408Sache  double misc_time_ms = pause_time_sec * MILLIUNITS - accounted_time_ms();
53769408Sache  print_stats(1, "Other", misc_time_ms);
53869408Sache  if (_cur_verify_before_time_ms > 0.0) {
53969408Sache    print_stats(2, "Verify Before", _cur_verify_before_time_ms);
54069408Sache  }
54169408Sache  if (G1CollectedHeap::heap()->evacuation_failed()) {
54269408Sache    double evac_fail_handling = _cur_evac_fail_recalc_used + _cur_evac_fail_remove_self_forwards +
543195609Smp      _cur_evac_fail_restore_remsets;
544195609Smp    print_stats(2, "Evacuation Failure", evac_fail_handling);
545195609Smp    if (G1Log::finest()) {
546195609Smp      print_stats(3, "Recalculate Used", _cur_evac_fail_recalc_used);
547195609Smp      print_stats(3, "Remove Self Forwards", _cur_evac_fail_remove_self_forwards);
548195609Smp      print_stats(3, "Restore RemSet", _cur_evac_fail_restore_remsets);
54969408Sache    }
55069408Sache  }
55169408Sache  print_stats(2, "Choose CSet",
55269408Sache    (_recorded_young_cset_choice_time_ms +
55369408Sache    _recorded_non_young_cset_choice_time_ms));
554195609Smp  print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
555195609Smp  print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
556100616Smp  print_stats(2, "Redirty Cards", _recorded_redirty_logged_cards_time_ms);
557100616Smp  par_phase_printer.print(RedirtyCards);
55869408Sache  if (G1EagerReclaimHumongousObjects) {
55969408Sache    print_stats(2, "Humongous Register", _cur_fast_reclaim_humongous_register_time_ms);
56069408Sache    if (G1Log::finest()) {
56169408Sache      print_stats(3, "Humongous Total", _cur_fast_reclaim_humongous_total);
56269408Sache      print_stats(3, "Humongous Candidate", _cur_fast_reclaim_humongous_candidates);
56369408Sache    }
56469408Sache    print_stats(2, "Humongous Reclaim", _cur_fast_reclaim_humongous_time_ms);
565100616Smp    if (G1Log::finest()) {
56669408Sache      print_stats(3, "Humongous Reclaimed", _cur_fast_reclaim_humongous_reclaimed);
56769408Sache    }
568195609Smp  }
56969408Sache  print_stats(2, "Free CSet",
57069408Sache    (_recorded_young_free_cset_time_ms +
571195609Smp    _recorded_non_young_free_cset_time_ms));
57269408Sache  if (G1Log::finest()) {
57369408Sache    print_stats(3, "Young Free CSet", _recorded_young_free_cset_time_ms);
574195609Smp    print_stats(3, "Non-Young Free CSet", _recorded_non_young_free_cset_time_ms);
575100616Smp  }
57669408Sache  if (_cur_verify_after_time_ms > 0.0) {
577195609Smp    print_stats(2, "Verify After", _cur_verify_after_time_ms);
57869408Sache  }
57969408Sache}
580195609Smp
58169408SacheG1GCParPhaseTimesTracker::G1GCParPhaseTimesTracker(G1GCPhaseTimes* phase_times, G1GCPhaseTimes::GCParPhases phase, uint worker_id) :
58269408Sache    _phase_times(phase_times), _phase(phase), _worker_id(worker_id) {
583195609Smp  if (_phase_times != NULL) {
58469408Sache    _start_time = os::elapsedTime();
58569408Sache  }
586195609Smp}
58769408Sache
58869408SacheG1GCParPhaseTimesTracker::~G1GCParPhaseTimesTracker() {
589195609Smp  if (_phase_times != NULL) {
59069408Sache    _phase_times->record_time_secs(_phase, _worker_id, os::elapsedTime() - _start_time);
591100616Smp  }
59269408Sache}
59369408Sache
59469408Sache