g1CollectorPolicy.cpp revision 9727:f944761a3ce3
1314817Sngie/*
2272343Sngie * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3272343Sngie * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4272343Sngie *
5272343Sngie * This code is free software; you can redistribute it and/or modify it
6272343Sngie * under the terms of the GNU General Public License version 2 only, as
7272343Sngie * published by the Free Software Foundation.
8272343Sngie *
9272343Sngie * This code is distributed in the hope that it will be useful, but WITHOUT
10272343Sngie * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11272343Sngie * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12272343Sngie * version 2 for more details (a copy is included in the LICENSE file that
13272343Sngie * accompanied this code).
14272343Sngie *
15272343Sngie * You should have received a copy of the GNU General Public License version
16272343Sngie * 2 along with this work; if not, write to the Free Software Foundation,
17272343Sngie * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18314817Sngie *
19272343Sngie * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20272343Sngie * or visit www.oracle.com if you need additional information or have any
21272343Sngie * questions.
22272343Sngie *
23272343Sngie */
24272343Sngie
25272343Sngie#include "precompiled.hpp"
26272343Sngie#include "gc/g1/concurrentG1Refine.hpp"
27272343Sngie#include "gc/g1/concurrentMark.hpp"
28272343Sngie#include "gc/g1/concurrentMarkThread.inline.hpp"
29272343Sngie#include "gc/g1/g1CollectedHeap.inline.hpp"
30272343Sngie#include "gc/g1/g1CollectorPolicy.hpp"
31272343Sngie#include "gc/g1/g1IHOPControl.hpp"
32272343Sngie#include "gc/g1/g1GCPhaseTimes.hpp"
33272343Sngie#include "gc/g1/heapRegion.inline.hpp"
34272343Sngie#include "gc/g1/heapRegionRemSet.hpp"
35272343Sngie#include "gc/shared/gcPolicyCounters.hpp"
36272343Sngie#include "runtime/arguments.hpp"
37272343Sngie#include "runtime/java.hpp"
38272343Sngie#include "runtime/mutexLocker.hpp"
39272343Sngie#include "utilities/debug.hpp"
40272343Sngie#include "utilities/pair.hpp"
41272343Sngie
42272343Sngie// Different defaults for different number of GC threads
43272343Sngie// They were chosen by running GCOld and SPECjbb on debris with different
44//   numbers of GC threads and choosing them based on the results
45
46// all the same
47static double rs_length_diff_defaults[] = {
48  0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
49};
50
51static double cost_per_card_ms_defaults[] = {
52  0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
53};
54
55// all the same
56static double young_cards_per_entry_ratio_defaults[] = {
57  1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
58};
59
60static double cost_per_entry_ms_defaults[] = {
61  0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
62};
63
64static double cost_per_byte_ms_defaults[] = {
65  0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
66};
67
68// these should be pretty consistent
69static double constant_other_time_ms_defaults[] = {
70  5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
71};
72
73
74static double young_other_cost_per_region_ms_defaults[] = {
75  0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
76};
77
78static double non_young_other_cost_per_region_ms_defaults[] = {
79  1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
80};
81
82G1CollectorPolicy::G1CollectorPolicy() :
83  _predictor(G1ConfidencePercent / 100.0),
84  _parallel_gc_threads(ParallelGCThreads),
85
86  _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
87  _stop_world_start(0.0),
88
89  _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
90  _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
91
92  _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
93  _prev_collection_pause_end_ms(0.0),
94  _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
95  _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
96  _cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)),
97  _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
98  _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
99  _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
100  _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
101  _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
102  _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
103  _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
104  _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
105  _non_young_other_cost_per_region_ms_seq(
106                                         new TruncatedSeq(TruncatedSeqLength)),
107
108  _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
109  _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
110
111  _pause_time_target_ms((double) MaxGCPauseMillis),
112
113  _recent_prev_end_times_for_all_gcs_sec(
114                                new TruncatedSeq(NumPrevPausesForHeuristics)),
115
116  _recent_avg_pause_time_ratio(0.0),
117  _rs_lengths_prediction(0),
118  _max_survivor_regions(0),
119
120  _eden_used_bytes_before_gc(0),
121  _survivor_used_bytes_before_gc(0),
122  _old_used_bytes_before_gc(0),
123  _humongous_used_bytes_before_gc(0),
124  _heap_used_bytes_before_gc(0),
125  _metaspace_used_bytes_before_gc(0),
126  _eden_capacity_bytes_before_gc(0),
127  _heap_capacity_bytes_before_gc(0),
128
129  _eden_cset_region_length(0),
130  _survivor_cset_region_length(0),
131  _old_cset_region_length(0),
132
133  _collection_set(NULL),
134  _collection_set_bytes_used_before(0),
135
136  // Incremental CSet attributes
137  _inc_cset_build_state(Inactive),
138  _inc_cset_head(NULL),
139  _inc_cset_tail(NULL),
140  _inc_cset_bytes_used_before(0),
141  _inc_cset_max_finger(NULL),
142  _inc_cset_recorded_rs_lengths(0),
143  _inc_cset_recorded_rs_lengths_diffs(0),
144  _inc_cset_predicted_elapsed_time_ms(0.0),
145  _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
146
147  // add here any more surv rate groups
148  _recorded_survivor_regions(0),
149  _recorded_survivor_head(NULL),
150  _recorded_survivor_tail(NULL),
151  _survivors_age_table(true),
152
153  _gc_overhead_perc(0.0),
154
155  _bytes_allocated_in_old_since_last_gc(0),
156  _ihop_control(NULL),
157  _initial_mark_to_mixed() {
158
159  // SurvRateGroups below must be initialized after the predictor because they
160  // indirectly use it through this object passed to their constructor.
161  _short_lived_surv_rate_group =
162    new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
163  _survivor_surv_rate_group =
164    new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
165
166  // Set up the region size and associated fields. Given that the
167  // policy is created before the heap, we have to set this up here,
168  // so it's done as soon as possible.
169
170  // It would have been natural to pass initial_heap_byte_size() and
171  // max_heap_byte_size() to setup_heap_region_size() but those have
172  // not been set up at this point since they should be aligned with
173  // the region size. So, there is a circular dependency here. We base
174  // the region size on the heap size, but the heap size should be
175  // aligned with the region size. To get around this we use the
176  // unaligned values for the heap.
177  HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
178  HeapRegionRemSet::setup_remset_size();
179
180  _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
181  _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
182  clear_ratio_check_data();
183
184  _phase_times = new G1GCPhaseTimes(_parallel_gc_threads);
185
186  int index = MIN2(_parallel_gc_threads - 1, 7);
187
188  _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
189  _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
190  _cost_scan_hcc_seq->add(0.0);
191  _young_cards_per_entry_ratio_seq->add(
192                                  young_cards_per_entry_ratio_defaults[index]);
193  _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
194  _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
195  _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
196  _young_other_cost_per_region_ms_seq->add(
197                               young_other_cost_per_region_ms_defaults[index]);
198  _non_young_other_cost_per_region_ms_seq->add(
199                           non_young_other_cost_per_region_ms_defaults[index]);
200
201  // Below, we might need to calculate the pause time target based on
202  // the pause interval. When we do so we are going to give G1 maximum
203  // flexibility and allow it to do pauses when it needs to. So, we'll
204  // arrange that the pause interval to be pause time target + 1 to
205  // ensure that a) the pause time target is maximized with respect to
206  // the pause interval and b) we maintain the invariant that pause
207  // time target < pause interval. If the user does not want this
208  // maximum flexibility, they will have to set the pause interval
209  // explicitly.
210
211  // First make sure that, if either parameter is set, its value is
212  // reasonable.
213  if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
214    if (MaxGCPauseMillis < 1) {
215      vm_exit_during_initialization("MaxGCPauseMillis should be "
216                                    "greater than 0");
217    }
218  }
219  if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
220    if (GCPauseIntervalMillis < 1) {
221      vm_exit_during_initialization("GCPauseIntervalMillis should be "
222                                    "greater than 0");
223    }
224  }
225
226  // Then, if the pause time target parameter was not set, set it to
227  // the default value.
228  if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
229    if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
230      // The default pause time target in G1 is 200ms
231      FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
232    } else {
233      // We do not allow the pause interval to be set without the
234      // pause time target
235      vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
236                                    "without setting MaxGCPauseMillis");
237    }
238  }
239
240  // Then, if the interval parameter was not set, set it according to
241  // the pause time target (this will also deal with the case when the
242  // pause time target is the default value).
243  if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
244    FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
245  }
246
247  // Finally, make sure that the two parameters are consistent.
248  if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
249    char buffer[256];
250    jio_snprintf(buffer, 256,
251                 "MaxGCPauseMillis (%u) should be less than "
252                 "GCPauseIntervalMillis (%u)",
253                 MaxGCPauseMillis, GCPauseIntervalMillis);
254    vm_exit_during_initialization(buffer);
255  }
256
257  double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
258  double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
259  _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
260
261  // start conservatively (around 50ms is about right)
262  _concurrent_mark_remark_times_ms->add(0.05);
263  _concurrent_mark_cleanup_times_ms->add(0.20);
264  _tenuring_threshold = MaxTenuringThreshold;
265
266  assert(GCTimeRatio > 0,
267         "we should have set it to a default value set_g1_gc_flags() "
268         "if a user set it to 0");
269  _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
270
271  uintx reserve_perc = G1ReservePercent;
272  // Put an artificial ceiling on this so that it's not set to a silly value.
273  if (reserve_perc > 50) {
274    reserve_perc = 50;
275    warning("G1ReservePercent is set to a value that is too large, "
276            "it's been updated to " UINTX_FORMAT, reserve_perc);
277  }
278  _reserve_factor = (double) reserve_perc / 100.0;
279  // This will be set when the heap is expanded
280  // for the first time during initialization.
281  _reserve_regions = 0;
282
283  _cset_chooser = new CollectionSetChooser();
284}
285
286G1CollectorPolicy::~G1CollectorPolicy() {
287  delete _ihop_control;
288}
289
290double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const {
291  return _predictor.get_new_prediction(seq);
292}
293
294void G1CollectorPolicy::initialize_alignments() {
295  _space_alignment = HeapRegion::GrainBytes;
296  size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint();
297  size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
298  _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
299}
300
301void G1CollectorPolicy::initialize_flags() {
302  if (G1HeapRegionSize != HeapRegion::GrainBytes) {
303    FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes);
304  }
305
306  if (SurvivorRatio < 1) {
307    vm_exit_during_initialization("Invalid survivor ratio specified");
308  }
309  CollectorPolicy::initialize_flags();
310  _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
311}
312
313void G1CollectorPolicy::post_heap_initialize() {
314  uintx max_regions = G1CollectedHeap::heap()->max_regions();
315  size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes;
316  if (max_young_size != MaxNewSize) {
317    FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size);
318  }
319
320  _ihop_control = create_ihop_control();
321}
322
323G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); }
324
325G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),
326        _min_desired_young_length(0), _max_desired_young_length(0) {
327  if (FLAG_IS_CMDLINE(NewRatio)) {
328    if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
329      warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
330    } else {
331      _sizer_kind = SizerNewRatio;
332      _adaptive_size = false;
333      return;
334    }
335  }
336
337  if (NewSize > MaxNewSize) {
338    if (FLAG_IS_CMDLINE(MaxNewSize)) {
339      warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
340              "A new max generation size of " SIZE_FORMAT "k will be used.",
341              NewSize/K, MaxNewSize/K, NewSize/K);
342    }
343    MaxNewSize = NewSize;
344  }
345
346  if (FLAG_IS_CMDLINE(NewSize)) {
347    _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes),
348                                     1U);
349    if (FLAG_IS_CMDLINE(MaxNewSize)) {
350      _max_desired_young_length =
351                             MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
352                                  1U);
353      _sizer_kind = SizerMaxAndNewSize;
354      _adaptive_size = _min_desired_young_length == _max_desired_young_length;
355    } else {
356      _sizer_kind = SizerNewSizeOnly;
357    }
358  } else if (FLAG_IS_CMDLINE(MaxNewSize)) {
359    _max_desired_young_length =
360                             MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
361                                  1U);
362    _sizer_kind = SizerMaxNewSizeOnly;
363  }
364}
365
366uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) {
367  uint default_value = (new_number_of_heap_regions * G1NewSizePercent) / 100;
368  return MAX2(1U, default_value);
369}
370
371uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) {
372  uint default_value = (new_number_of_heap_regions * G1MaxNewSizePercent) / 100;
373  return MAX2(1U, default_value);
374}
375
376void G1YoungGenSizer::recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length) {
377  assert(number_of_heap_regions > 0, "Heap must be initialized");
378
379  switch (_sizer_kind) {
380    case SizerDefaults:
381      *min_young_length = calculate_default_min_length(number_of_heap_regions);
382      *max_young_length = calculate_default_max_length(number_of_heap_regions);
383      break;
384    case SizerNewSizeOnly:
385      *max_young_length = calculate_default_max_length(number_of_heap_regions);
386      *max_young_length = MAX2(*min_young_length, *max_young_length);
387      break;
388    case SizerMaxNewSizeOnly:
389      *min_young_length = calculate_default_min_length(number_of_heap_regions);
390      *min_young_length = MIN2(*min_young_length, *max_young_length);
391      break;
392    case SizerMaxAndNewSize:
393      // Do nothing. Values set on the command line, don't update them at runtime.
394      break;
395    case SizerNewRatio:
396      *min_young_length = number_of_heap_regions / (NewRatio + 1);
397      *max_young_length = *min_young_length;
398      break;
399    default:
400      ShouldNotReachHere();
401  }
402
403  assert(*min_young_length <= *max_young_length, "Invalid min/max young gen size values");
404}
405
406uint G1YoungGenSizer::max_young_length(uint number_of_heap_regions) {
407  // We need to pass the desired values because recalculation may not update these
408  // values in some cases.
409  uint temp = _min_desired_young_length;
410  uint result = _max_desired_young_length;
411  recalculate_min_max_young_length(number_of_heap_regions, &temp, &result);
412  return result;
413}
414
415void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
416  recalculate_min_max_young_length(new_number_of_heap_regions, &_min_desired_young_length,
417          &_max_desired_young_length);
418}
419
420void G1CollectorPolicy::init() {
421  // Set aside an initial future to_space.
422  _g1 = G1CollectedHeap::heap();
423
424  assert(Heap_lock->owned_by_self(), "Locking discipline.");
425
426  initialize_gc_policy_counters();
427
428  if (adaptive_young_list_length()) {
429    _young_list_fixed_length = 0;
430  } else {
431    _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
432  }
433  _free_regions_at_end_of_collection = _g1->num_free_regions();
434
435  update_young_list_max_and_target_length();
436  // We may immediately start allocating regions and placing them on the
437  // collection set list. Initialize the per-collection set info
438  start_incremental_cset_building();
439}
440
441void G1CollectorPolicy::note_gc_start(uint num_active_workers) {
442  phase_times()->note_gc_start(num_active_workers);
443}
444
445// Create the jstat counters for the policy.
446void G1CollectorPolicy::initialize_gc_policy_counters() {
447  _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
448}
449
450bool G1CollectorPolicy::predict_will_fit(uint young_length,
451                                         double base_time_ms,
452                                         uint base_free_regions,
453                                         double target_pause_time_ms) const {
454  if (young_length >= base_free_regions) {
455    // end condition 1: not enough space for the young regions
456    return false;
457  }
458
459  double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
460  size_t bytes_to_copy =
461               (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
462  double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
463  double young_other_time_ms = predict_young_other_time_ms(young_length);
464  double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
465  if (pause_time_ms > target_pause_time_ms) {
466    // end condition 2: prediction is over the target pause time
467    return false;
468  }
469
470  size_t free_bytes = (base_free_regions - young_length) * HeapRegion::GrainBytes;
471
472  // When copying, we will likely need more bytes free than is live in the region.
473  // Add some safety margin to factor in the confidence of our guess, and the
474  // natural expected waste.
475  // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty
476  // of the calculation: the lower the confidence, the more headroom.
477  // (100 + TargetPLABWastePct) represents the increase in expected bytes during
478  // copying due to anticipated waste in the PLABs.
479  double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0;
480  size_t expected_bytes_to_copy = safety_factor * bytes_to_copy;
481
482  if (expected_bytes_to_copy > free_bytes) {
483    // end condition 3: out-of-space
484    return false;
485  }
486
487  // success!
488  return true;
489}
490
491void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {
492  // re-calculate the necessary reserve
493  double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
494  // We use ceiling so that if reserve_regions_d is > 0.0 (but
495  // smaller than 1.0) we'll get 1.
496  _reserve_regions = (uint) ceil(reserve_regions_d);
497
498  _young_gen_sizer->heap_size_changed(new_number_of_regions);
499}
500
501uint G1CollectorPolicy::calculate_young_list_desired_min_length(
502                                                       uint base_min_length) const {
503  uint desired_min_length = 0;
504  if (adaptive_young_list_length()) {
505    if (_alloc_rate_ms_seq->num() > 3) {
506      double now_sec = os::elapsedTime();
507      double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
508      double alloc_rate_ms = predict_alloc_rate_ms();
509      desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
510    } else {
511      // otherwise we don't have enough info to make the prediction
512    }
513  }
514  desired_min_length += base_min_length;
515  // make sure we don't go below any user-defined minimum bound
516  return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
517}
518
519uint G1CollectorPolicy::calculate_young_list_desired_max_length() const {
520  // Here, we might want to also take into account any additional
521  // constraints (i.e., user-defined minimum bound). Currently, we
522  // effectively don't set this bound.
523  return _young_gen_sizer->max_desired_young_length();
524}
525
526uint G1CollectorPolicy::update_young_list_max_and_target_length() {
527  return update_young_list_max_and_target_length(get_new_prediction(_rs_lengths_seq));
528}
529
530uint G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) {
531  uint unbounded_target_length = update_young_list_target_length(rs_lengths);
532  update_max_gc_locker_expansion();
533  return unbounded_target_length;
534}
535
536uint G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
537  YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths);
538  _young_list_target_length = young_lengths.first;
539  return young_lengths.second;
540}
541
542G1CollectorPolicy::YoungTargetLengths G1CollectorPolicy::young_list_target_lengths(size_t rs_lengths) const {
543  YoungTargetLengths result;
544
545  // Calculate the absolute and desired min bounds first.
546
547  // This is how many young regions we already have (currently: the survivors).
548  uint base_min_length = recorded_survivor_regions();
549  uint desired_min_length = calculate_young_list_desired_min_length(base_min_length);
550  // This is the absolute minimum young length. Ensure that we
551  // will at least have one eden region available for allocation.
552  uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1);
553  // If we shrank the young list target it should not shrink below the current size.
554  desired_min_length = MAX2(desired_min_length, absolute_min_length);
555  // Calculate the absolute and desired max bounds.
556
557  uint desired_max_length = calculate_young_list_desired_max_length();
558
559  uint young_list_target_length = 0;
560  if (adaptive_young_list_length()) {
561    if (collector_state()->gcs_are_young()) {
562      young_list_target_length =
563                        calculate_young_list_target_length(rs_lengths,
564                                                           base_min_length,
565                                                           desired_min_length,
566                                                           desired_max_length);
567    } else {
568      // Don't calculate anything and let the code below bound it to
569      // the desired_min_length, i.e., do the next GC as soon as
570      // possible to maximize how many old regions we can add to it.
571    }
572  } else {
573    // The user asked for a fixed young gen so we'll fix the young gen
574    // whether the next GC is young or mixed.
575    young_list_target_length = _young_list_fixed_length;
576  }
577
578  result.second = young_list_target_length;
579
580  // We will try our best not to "eat" into the reserve.
581  uint absolute_max_length = 0;
582  if (_free_regions_at_end_of_collection > _reserve_regions) {
583    absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
584  }
585  if (desired_max_length > absolute_max_length) {
586    desired_max_length = absolute_max_length;
587  }
588
589  // Make sure we don't go over the desired max length, nor under the
590  // desired min length. In case they clash, desired_min_length wins
591  // which is why that test is second.
592  if (young_list_target_length > desired_max_length) {
593    young_list_target_length = desired_max_length;
594  }
595  if (young_list_target_length < desired_min_length) {
596    young_list_target_length = desired_min_length;
597  }
598
599  assert(young_list_target_length > recorded_survivor_regions(),
600         "we should be able to allocate at least one eden region");
601  assert(young_list_target_length >= absolute_min_length, "post-condition");
602
603  result.first = young_list_target_length;
604  return result;
605}
606
607uint
608G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
609                                                     uint base_min_length,
610                                                     uint desired_min_length,
611                                                     uint desired_max_length) const {
612  assert(adaptive_young_list_length(), "pre-condition");
613  assert(collector_state()->gcs_are_young(), "only call this for young GCs");
614
615  // In case some edge-condition makes the desired max length too small...
616  if (desired_max_length <= desired_min_length) {
617    return desired_min_length;
618  }
619
620  // We'll adjust min_young_length and max_young_length not to include
621  // the already allocated young regions (i.e., so they reflect the
622  // min and max eden regions we'll allocate). The base_min_length
623  // will be reflected in the predictions by the
624  // survivor_regions_evac_time prediction.
625  assert(desired_min_length > base_min_length, "invariant");
626  uint min_young_length = desired_min_length - base_min_length;
627  assert(desired_max_length > base_min_length, "invariant");
628  uint max_young_length = desired_max_length - base_min_length;
629
630  double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
631  double survivor_regions_evac_time = predict_survivor_regions_evac_time();
632  size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
633  size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
634  size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
635  double base_time_ms =
636    predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
637    survivor_regions_evac_time;
638  uint available_free_regions = _free_regions_at_end_of_collection;
639  uint base_free_regions = 0;
640  if (available_free_regions > _reserve_regions) {
641    base_free_regions = available_free_regions - _reserve_regions;
642  }
643
644  // Here, we will make sure that the shortest young length that
645  // makes sense fits within the target pause time.
646
647  if (predict_will_fit(min_young_length, base_time_ms,
648                       base_free_regions, target_pause_time_ms)) {
649    // The shortest young length will fit into the target pause time;
650    // we'll now check whether the absolute maximum number of young
651    // regions will fit in the target pause time. If not, we'll do
652    // a binary search between min_young_length and max_young_length.
653    if (predict_will_fit(max_young_length, base_time_ms,
654                         base_free_regions, target_pause_time_ms)) {
655      // The maximum young length will fit into the target pause time.
656      // We are done so set min young length to the maximum length (as
657      // the result is assumed to be returned in min_young_length).
658      min_young_length = max_young_length;
659    } else {
660      // The maximum possible number of young regions will not fit within
661      // the target pause time so we'll search for the optimal
662      // length. The loop invariants are:
663      //
664      // min_young_length < max_young_length
665      // min_young_length is known to fit into the target pause time
666      // max_young_length is known not to fit into the target pause time
667      //
668      // Going into the loop we know the above hold as we've just
669      // checked them. Every time around the loop we check whether
670      // the middle value between min_young_length and
671      // max_young_length fits into the target pause time. If it
672      // does, it becomes the new min. If it doesn't, it becomes
673      // the new max. This way we maintain the loop invariants.
674
675      assert(min_young_length < max_young_length, "invariant");
676      uint diff = (max_young_length - min_young_length) / 2;
677      while (diff > 0) {
678        uint young_length = min_young_length + diff;
679        if (predict_will_fit(young_length, base_time_ms,
680                             base_free_regions, target_pause_time_ms)) {
681          min_young_length = young_length;
682        } else {
683          max_young_length = young_length;
684        }
685        assert(min_young_length <  max_young_length, "invariant");
686        diff = (max_young_length - min_young_length) / 2;
687      }
688      // The results is min_young_length which, according to the
689      // loop invariants, should fit within the target pause time.
690
691      // These are the post-conditions of the binary search above:
692      assert(min_young_length < max_young_length,
693             "otherwise we should have discovered that max_young_length "
694             "fits into the pause target and not done the binary search");
695      assert(predict_will_fit(min_young_length, base_time_ms,
696                              base_free_regions, target_pause_time_ms),
697             "min_young_length, the result of the binary search, should "
698             "fit into the pause target");
699      assert(!predict_will_fit(min_young_length + 1, base_time_ms,
700                               base_free_regions, target_pause_time_ms),
701             "min_young_length, the result of the binary search, should be "
702             "optimal, so no larger length should fit into the pause target");
703    }
704  } else {
705    // Even the minimum length doesn't fit into the pause time
706    // target, return it as the result nevertheless.
707  }
708  return base_min_length + min_young_length;
709}
710
711double G1CollectorPolicy::predict_survivor_regions_evac_time() const {
712  double survivor_regions_evac_time = 0.0;
713  for (HeapRegion * r = _recorded_survivor_head;
714       r != NULL && r != _recorded_survivor_tail->get_next_young_region();
715       r = r->get_next_young_region()) {
716    survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young());
717  }
718  return survivor_regions_evac_time;
719}
720
721void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
722  guarantee( adaptive_young_list_length(), "should not call this otherwise" );
723
724  size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
725  if (rs_lengths > _rs_lengths_prediction) {
726    // add 10% to avoid having to recalculate often
727    size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
728    update_rs_lengths_prediction(rs_lengths_prediction);
729
730    update_young_list_max_and_target_length(rs_lengths_prediction);
731  }
732}
733
734void G1CollectorPolicy::update_rs_lengths_prediction() {
735  update_rs_lengths_prediction(get_new_prediction(_rs_lengths_seq));
736}
737
738void G1CollectorPolicy::update_rs_lengths_prediction(size_t prediction) {
739  if (collector_state()->gcs_are_young() && adaptive_young_list_length()) {
740    _rs_lengths_prediction = prediction;
741  }
742}
743
744HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
745                                               bool is_tlab,
746                                               bool* gc_overhead_limit_was_exceeded) {
747  guarantee(false, "Not using this policy feature yet.");
748  return NULL;
749}
750
751// This method controls how a collector handles one or more
752// of its generations being fully allocated.
753HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
754                                                       bool is_tlab) {
755  guarantee(false, "Not using this policy feature yet.");
756  return NULL;
757}
758
759
760#ifndef PRODUCT
761bool G1CollectorPolicy::verify_young_ages() {
762  HeapRegion* head = _g1->young_list()->first_region();
763  return
764    verify_young_ages(head, _short_lived_surv_rate_group);
765  // also call verify_young_ages on any additional surv rate groups
766}
767
768bool
769G1CollectorPolicy::verify_young_ages(HeapRegion* head,
770                                     SurvRateGroup *surv_rate_group) {
771  guarantee( surv_rate_group != NULL, "pre-condition" );
772
773  const char* name = surv_rate_group->name();
774  bool ret = true;
775  int prev_age = -1;
776
777  for (HeapRegion* curr = head;
778       curr != NULL;
779       curr = curr->get_next_young_region()) {
780    SurvRateGroup* group = curr->surv_rate_group();
781    if (group == NULL && !curr->is_survivor()) {
782      log_info(gc, verify)("## %s: encountered NULL surv_rate_group", name);
783      ret = false;
784    }
785
786    if (surv_rate_group == group) {
787      int age = curr->age_in_surv_rate_group();
788
789      if (age < 0) {
790        log_info(gc, verify)("## %s: encountered negative age", name);
791        ret = false;
792      }
793
794      if (age <= prev_age) {
795        log_info(gc, verify)("## %s: region ages are not strictly increasing (%d, %d)", name, age, prev_age);
796        ret = false;
797      }
798      prev_age = age;
799    }
800  }
801
802  return ret;
803}
804#endif // PRODUCT
805
806void G1CollectorPolicy::record_full_collection_start() {
807  _full_collection_start_sec = os::elapsedTime();
808  record_heap_size_info_at_start(true /* full */);
809  // Release the future to-space so that it is available for compaction into.
810  collector_state()->set_full_collection(true);
811}
812
813void G1CollectorPolicy::record_full_collection_end() {
814  // Consider this like a collection pause for the purposes of allocation
815  // since last pause.
816  double end_sec = os::elapsedTime();
817  double full_gc_time_sec = end_sec - _full_collection_start_sec;
818  double full_gc_time_ms = full_gc_time_sec * 1000.0;
819
820  _trace_old_gen_time_data.record_full_collection(full_gc_time_ms);
821
822  update_recent_gc_times(end_sec, full_gc_time_ms);
823
824  collector_state()->set_full_collection(false);
825
826  // "Nuke" the heuristics that control the young/mixed GC
827  // transitions and make sure we start with young GCs after the Full GC.
828  collector_state()->set_gcs_are_young(true);
829  collector_state()->set_last_young_gc(false);
830  collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
831  collector_state()->set_during_initial_mark_pause(false);
832  collector_state()->set_in_marking_window(false);
833  collector_state()->set_in_marking_window_im(false);
834
835  _short_lived_surv_rate_group->start_adding_regions();
836  // also call this on any additional surv rate groups
837
838  record_survivor_regions(0, NULL, NULL);
839
840  _free_regions_at_end_of_collection = _g1->num_free_regions();
841  // Reset survivors SurvRateGroup.
842  _survivor_surv_rate_group->reset();
843  update_young_list_max_and_target_length();
844  update_rs_lengths_prediction();
845  cset_chooser()->clear();
846
847  _bytes_allocated_in_old_since_last_gc = 0;
848
849  record_pause(FullGC, _full_collection_start_sec, end_sec);
850}
851
852void G1CollectorPolicy::record_stop_world_start() {
853  _stop_world_start = os::elapsedTime();
854}
855
856void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
857  // We only need to do this here as the policy will only be applied
858  // to the GC we're about to start. so, no point is calculating this
859  // every time we calculate / recalculate the target young length.
860  update_survivors_policy();
861
862  assert(_g1->used() == _g1->recalculate_used(),
863         "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT,
864         _g1->used(), _g1->recalculate_used());
865
866  double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
867  _trace_young_gen_time_data.record_start_collection(s_w_t_ms);
868  _stop_world_start = 0.0;
869
870  record_heap_size_info_at_start(false /* full */);
871
872  phase_times()->record_cur_collection_start_sec(start_time_sec);
873  _pending_cards = _g1->pending_card_num();
874
875  _collection_set_bytes_used_before = 0;
876  _bytes_copied_during_gc = 0;
877
878  collector_state()->set_last_gc_was_young(false);
879
880  // do that for any other surv rate groups
881  _short_lived_surv_rate_group->stop_adding_regions();
882  _survivors_age_table.clear();
883
884  assert( verify_young_ages(), "region age verification" );
885}
886
887void G1CollectorPolicy::record_concurrent_mark_init_end(double
888                                                   mark_init_elapsed_time_ms) {
889  collector_state()->set_during_marking(true);
890  assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
891  collector_state()->set_during_initial_mark_pause(false);
892}
893
894void G1CollectorPolicy::record_concurrent_mark_remark_start() {
895  _mark_remark_start_sec = os::elapsedTime();
896  collector_state()->set_during_marking(false);
897}
898
899void G1CollectorPolicy::record_concurrent_mark_remark_end() {
900  double end_time_sec = os::elapsedTime();
901  double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
902  _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
903  _prev_collection_pause_end_ms += elapsed_time_ms;
904
905  record_pause(Remark, _mark_remark_start_sec, end_time_sec);
906}
907
908void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
909  _mark_cleanup_start_sec = os::elapsedTime();
910}
911
912void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
913  bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
914                                                              "skip last young-only gc");
915  collector_state()->set_last_young_gc(should_continue_with_reclaim);
916  // We skip the marking phase.
917  if (!should_continue_with_reclaim) {
918    abort_time_to_mixed_tracking();
919  }
920  collector_state()->set_in_marking_window(false);
921}
922
923void G1CollectorPolicy::record_concurrent_pause() {
924  if (_stop_world_start > 0.0) {
925    double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
926    _trace_young_gen_time_data.record_yield_time(yield_ms);
927  }
928}
929
930double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
931  return phase_times()->average_time_ms(phase);
932}
933
934double G1CollectorPolicy::young_other_time_ms() const {
935  return phase_times()->young_cset_choice_time_ms() +
936         phase_times()->young_free_cset_time_ms();
937}
938
939double G1CollectorPolicy::non_young_other_time_ms() const {
940  return phase_times()->non_young_cset_choice_time_ms() +
941         phase_times()->non_young_free_cset_time_ms();
942
943}
944
945double G1CollectorPolicy::other_time_ms(double pause_time_ms) const {
946  return pause_time_ms -
947         average_time_ms(G1GCPhaseTimes::UpdateRS) -
948         average_time_ms(G1GCPhaseTimes::ScanRS) -
949         average_time_ms(G1GCPhaseTimes::ObjCopy) -
950         average_time_ms(G1GCPhaseTimes::Termination);
951}
952
953double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const {
954  return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms();
955}
956
957bool G1CollectorPolicy::about_to_start_mixed_phase() const {
958  return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc();
959}
960
961bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
962  if (about_to_start_mixed_phase()) {
963    return false;
964  }
965
966  size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold();
967
968  size_t cur_used_bytes = _g1->non_young_capacity_bytes();
969  size_t alloc_byte_size = alloc_word_size * HeapWordSize;
970  size_t marking_request_bytes = cur_used_bytes + alloc_byte_size;
971
972  bool result = false;
973  if (marking_request_bytes > marking_initiating_used_threshold) {
974    result = collector_state()->gcs_are_young() && !collector_state()->last_young_gc();
975    log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s",
976                              result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)",
977                              cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1->capacity() * 100, source);
978  }
979
980  return result;
981}
982
983// Anything below that is considered to be zero
984#define MIN_TIMER_GRANULARITY 0.0000001
985
986void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned) {
987  double end_time_sec = os::elapsedTime();
988
989  size_t cur_used_bytes = _g1->used();
990  assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
991  bool last_pause_included_initial_mark = false;
992  bool update_stats = !_g1->evacuation_failed();
993
994  NOT_PRODUCT(_short_lived_surv_rate_group->print());
995
996  record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
997
998  last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
999  if (last_pause_included_initial_mark) {
1000    record_concurrent_mark_init_end(0.0);
1001  } else {
1002    maybe_start_marking();
1003  }
1004
1005  double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
1006  if (app_time_ms < MIN_TIMER_GRANULARITY) {
1007    // This usually happens due to the timer not having the required
1008    // granularity. Some Linuxes are the usual culprits.
1009    // We'll just set it to something (arbitrarily) small.
1010    app_time_ms = 1.0;
1011  }
1012
1013  if (update_stats) {
1014    _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
1015    // We maintain the invariant that all objects allocated by mutator
1016    // threads will be allocated out of eden regions. So, we can use
1017    // the eden region number allocated since the previous GC to
1018    // calculate the application's allocate rate. The only exception
1019    // to that is humongous objects that are allocated separately. But
1020    // given that humongous object allocations do not really affect
1021    // either the pause's duration nor when the next pause will take
1022    // place we can safely ignore them here.
1023    uint regions_allocated = eden_cset_region_length();
1024    double alloc_rate_ms = (double) regions_allocated / app_time_ms;
1025    _alloc_rate_ms_seq->add(alloc_rate_ms);
1026
1027    double interval_ms =
1028      (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
1029    update_recent_gc_times(end_time_sec, pause_time_ms);
1030    _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
1031    if (recent_avg_pause_time_ratio() < 0.0 ||
1032        (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
1033      // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
1034      // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
1035      if (_recent_avg_pause_time_ratio < 0.0) {
1036        _recent_avg_pause_time_ratio = 0.0;
1037      } else {
1038        assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
1039        _recent_avg_pause_time_ratio = 1.0;
1040      }
1041    }
1042
1043    // Compute the ratio of just this last pause time to the entire time range stored
1044    // in the vectors. Comparing this pause to the entire range, rather than only the
1045    // most recent interval, has the effect of smoothing over a possible transient 'burst'
1046    // of more frequent pauses that don't really reflect a change in heap occupancy.
1047    // This reduces the likelihood of a needless heap expansion being triggered.
1048    _last_pause_time_ratio =
1049      (pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms;
1050  }
1051
1052  bool new_in_marking_window = collector_state()->in_marking_window();
1053  bool new_in_marking_window_im = false;
1054  if (last_pause_included_initial_mark) {
1055    new_in_marking_window = true;
1056    new_in_marking_window_im = true;
1057  }
1058
1059  if (collector_state()->last_young_gc()) {
1060    // This is supposed to to be the "last young GC" before we start
1061    // doing mixed GCs. Here we decide whether to start mixed GCs or not.
1062    assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
1063
1064    if (next_gc_should_be_mixed("start mixed GCs",
1065                                "do not start mixed GCs")) {
1066      collector_state()->set_gcs_are_young(false);
1067    } else {
1068      // We aborted the mixed GC phase early.
1069      abort_time_to_mixed_tracking();
1070    }
1071
1072    collector_state()->set_last_young_gc(false);
1073  }
1074
1075  if (!collector_state()->last_gc_was_young()) {
1076    // This is a mixed GC. Here we decide whether to continue doing
1077    // mixed GCs or not.
1078    if (!next_gc_should_be_mixed("continue mixed GCs",
1079                                 "do not continue mixed GCs")) {
1080      collector_state()->set_gcs_are_young(true);
1081
1082      maybe_start_marking();
1083    }
1084  }
1085
1086  _short_lived_surv_rate_group->start_adding_regions();
1087  // Do that for any other surv rate groups
1088
1089  if (update_stats) {
1090    double cost_per_card_ms = 0.0;
1091    double cost_scan_hcc = average_time_ms(G1GCPhaseTimes::ScanHCC);
1092    if (_pending_cards > 0) {
1093      cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - cost_scan_hcc) / (double) _pending_cards;
1094      _cost_per_card_ms_seq->add(cost_per_card_ms);
1095    }
1096    _cost_scan_hcc_seq->add(cost_scan_hcc);
1097
1098    double cost_per_entry_ms = 0.0;
1099    if (cards_scanned > 10) {
1100      cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
1101      if (collector_state()->last_gc_was_young()) {
1102        _cost_per_entry_ms_seq->add(cost_per_entry_ms);
1103      } else {
1104        _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
1105      }
1106    }
1107
1108    if (_max_rs_lengths > 0) {
1109      double cards_per_entry_ratio =
1110        (double) cards_scanned / (double) _max_rs_lengths;
1111      if (collector_state()->last_gc_was_young()) {
1112        _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1113      } else {
1114        _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1115      }
1116    }
1117
1118    // This is defensive. For a while _max_rs_lengths could get
1119    // smaller than _recorded_rs_lengths which was causing
1120    // rs_length_diff to get very large and mess up the RSet length
1121    // predictions. The reason was unsafe concurrent updates to the
1122    // _inc_cset_recorded_rs_lengths field which the code below guards
1123    // against (see CR 7118202). This bug has now been fixed (see CR
1124    // 7119027). However, I'm still worried that
1125    // _inc_cset_recorded_rs_lengths might still end up somewhat
1126    // inaccurate. The concurrent refinement thread calculates an
1127    // RSet's length concurrently with other CR threads updating it
1128    // which might cause it to calculate the length incorrectly (if,
1129    // say, it's in mid-coarsening). So I'll leave in the defensive
1130    // conditional below just in case.
1131    size_t rs_length_diff = 0;
1132    if (_max_rs_lengths > _recorded_rs_lengths) {
1133      rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
1134    }
1135    _rs_length_diff_seq->add((double) rs_length_diff);
1136
1137    size_t freed_bytes = _heap_used_bytes_before_gc - cur_used_bytes;
1138    size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes;
1139    double cost_per_byte_ms = 0.0;
1140
1141    if (copied_bytes > 0) {
1142      cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
1143      if (collector_state()->in_marking_window()) {
1144        _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
1145      } else {
1146        _cost_per_byte_ms_seq->add(cost_per_byte_ms);
1147      }
1148    }
1149
1150    if (young_cset_region_length() > 0) {
1151      _young_other_cost_per_region_ms_seq->add(young_other_time_ms() /
1152                                               young_cset_region_length());
1153    }
1154
1155    if (old_cset_region_length() > 0) {
1156      _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() /
1157                                                   old_cset_region_length());
1158    }
1159
1160    _constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms));
1161
1162    _pending_cards_seq->add((double) _pending_cards);
1163    _rs_lengths_seq->add((double) _max_rs_lengths);
1164  }
1165
1166  collector_state()->set_in_marking_window(new_in_marking_window);
1167  collector_state()->set_in_marking_window_im(new_in_marking_window_im);
1168  _free_regions_at_end_of_collection = _g1->num_free_regions();
1169  // IHOP control wants to know the expected young gen length if it were not
1170  // restrained by the heap reserve. Using the actual length would make the
1171  // prediction too small and the limit the young gen every time we get to the
1172  // predicted target occupancy.
1173  size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
1174  update_rs_lengths_prediction();
1175
1176  update_ihop_prediction(app_time_ms / 1000.0,
1177                         _bytes_allocated_in_old_since_last_gc,
1178                         last_unrestrained_young_length * HeapRegion::GrainBytes);
1179  _bytes_allocated_in_old_since_last_gc = 0;
1180
1181  _ihop_control->send_trace_event(_g1->gc_tracer_stw());
1182
1183  // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1184  double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1185
1186  double scan_hcc_time_ms = average_time_ms(G1GCPhaseTimes::ScanHCC);
1187
1188  if (update_rs_time_goal_ms < scan_hcc_time_ms) {
1189    log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
1190                                "Update RS time goal: %1.2fms Scan HCC time: %1.2fms",
1191                                update_rs_time_goal_ms, scan_hcc_time_ms);
1192
1193    update_rs_time_goal_ms = 0;
1194  } else {
1195    update_rs_time_goal_ms -= scan_hcc_time_ms;
1196  }
1197  adjust_concurrent_refinement(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
1198                               phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
1199                               update_rs_time_goal_ms);
1200
1201  cset_chooser()->verify();
1202}
1203
1204G1IHOPControl* G1CollectorPolicy::create_ihop_control() const {
1205  if (G1UseAdaptiveIHOP) {
1206    return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
1207                                     G1CollectedHeap::heap()->max_capacity(),
1208                                     &_predictor,
1209                                     G1ReservePercent,
1210                                     G1HeapWastePercent);
1211  } else {
1212    return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent,
1213                                   G1CollectedHeap::heap()->max_capacity());
1214  }
1215}
1216
1217void G1CollectorPolicy::update_ihop_prediction(double mutator_time_s,
1218                                               size_t mutator_alloc_bytes,
1219                                               size_t young_gen_size) {
1220  // Always try to update IHOP prediction. Even evacuation failures give information
1221  // about e.g. whether to start IHOP earlier next time.
1222
1223  // Avoid using really small application times that might create samples with
1224  // very high or very low values. They may be caused by e.g. back-to-back gcs.
1225  double const min_valid_time = 1e-6;
1226
1227  bool report = false;
1228
1229  double marking_to_mixed_time = -1.0;
1230  if (!collector_state()->last_gc_was_young() && _initial_mark_to_mixed.has_result()) {
1231    marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time();
1232    assert(marking_to_mixed_time > 0.0,
1233           "Initial mark to mixed time must be larger than zero but is %.3f",
1234           marking_to_mixed_time);
1235    if (marking_to_mixed_time > min_valid_time) {
1236      _ihop_control->update_marking_length(marking_to_mixed_time);
1237      report = true;
1238    }
1239  }
1240
1241  // As an approximation for the young gc promotion rates during marking we use
1242  // all of them. In many applications there are only a few if any young gcs during
1243  // marking, which makes any prediction useless. This increases the accuracy of the
1244  // prediction.
1245  if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) {
1246    _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
1247    report = true;
1248  }
1249
1250  if (report) {
1251    report_ihop_statistics();
1252  }
1253}
1254
1255void G1CollectorPolicy::report_ihop_statistics() {
1256  _ihop_control->print();
1257}
1258
1259#define EXT_SIZE_FORMAT "%.1f%s"
1260#define EXT_SIZE_PARAMS(bytes)                                  \
1261  byte_size_in_proper_unit((double)(bytes)),                    \
1262  proper_unit_for_byte_size((bytes))
1263
1264void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
1265  YoungList* young_list = _g1->young_list();
1266  _eden_used_bytes_before_gc = young_list->eden_used_bytes();
1267  _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
1268  _heap_capacity_bytes_before_gc = _g1->capacity();
1269  _old_used_bytes_before_gc = _g1->old_regions_count() * HeapRegion::GrainBytes;
1270  _humongous_used_bytes_before_gc = _g1->humongous_regions_count() * HeapRegion::GrainBytes;
1271  _heap_used_bytes_before_gc = _g1->used();
1272  _eden_capacity_bytes_before_gc = (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
1273  _metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes();
1274}
1275
1276void G1CollectorPolicy::print_detailed_heap_transition() const {
1277  YoungList* young_list = _g1->young_list();
1278
1279  size_t eden_used_bytes_after_gc = young_list->eden_used_bytes();
1280  size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes();
1281  size_t heap_used_bytes_after_gc = _g1->used();
1282  size_t old_used_bytes_after_gc = _g1->old_regions_count() * HeapRegion::GrainBytes;
1283  size_t humongous_used_bytes_after_gc = _g1->humongous_regions_count() * HeapRegion::GrainBytes;
1284
1285  size_t heap_capacity_bytes_after_gc = _g1->capacity();
1286  size_t eden_capacity_bytes_after_gc =
1287    (_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc;
1288  size_t survivor_capacity_bytes_after_gc = _max_survivor_regions * HeapRegion::GrainBytes;
1289
1290  log_info(gc, heap)("Eden: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
1291                     _eden_used_bytes_before_gc / K, eden_used_bytes_after_gc /K, eden_capacity_bytes_after_gc /K);
1292  log_info(gc, heap)("Survivor: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
1293                     _survivor_used_bytes_before_gc / K, survivor_used_bytes_after_gc /K, survivor_capacity_bytes_after_gc /K);
1294  log_info(gc, heap)("Old: " SIZE_FORMAT "K->" SIZE_FORMAT "K",
1295                     _old_used_bytes_before_gc / K, old_used_bytes_after_gc /K);
1296  log_info(gc, heap)("Humongous: " SIZE_FORMAT "K->" SIZE_FORMAT "K",
1297                     _humongous_used_bytes_before_gc / K, humongous_used_bytes_after_gc /K);
1298
1299  MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc);
1300}
1301
1302void G1CollectorPolicy::print_phases(double pause_time_sec) {
1303  phase_times()->print(pause_time_sec);
1304}
1305
1306void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
1307                                                     double update_rs_processed_buffers,
1308                                                     double goal_ms) {
1309  DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1310  ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
1311
1312  if (G1UseAdaptiveConcRefinement) {
1313    const int k_gy = 3, k_gr = 6;
1314    const double inc_k = 1.1, dec_k = 0.9;
1315
1316    int g = cg1r->green_zone();
1317    if (update_rs_time > goal_ms) {
1318      g = (int)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
1319    } else {
1320      if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
1321        g = (int)MAX2(g * inc_k, g + 1.0);
1322      }
1323    }
1324    // Change the refinement threads params
1325    cg1r->set_green_zone(g);
1326    cg1r->set_yellow_zone(g * k_gy);
1327    cg1r->set_red_zone(g * k_gr);
1328    cg1r->reinitialize_threads();
1329
1330    int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * _predictor.sigma()), 1);
1331    int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
1332                                    cg1r->yellow_zone());
1333    // Change the barrier params
1334    dcqs.set_process_completed_threshold(processing_threshold);
1335    dcqs.set_max_completed_queue(cg1r->red_zone());
1336  }
1337
1338  int curr_queue_size = dcqs.completed_buffers_num();
1339  if (curr_queue_size >= cg1r->yellow_zone()) {
1340    dcqs.set_completed_queue_padding(curr_queue_size);
1341  } else {
1342    dcqs.set_completed_queue_padding(0);
1343  }
1344  dcqs.notify_if_necessary();
1345}
1346
1347size_t G1CollectorPolicy::predict_rs_length_diff() const {
1348  return (size_t) get_new_prediction(_rs_length_diff_seq);
1349}
1350
1351double G1CollectorPolicy::predict_alloc_rate_ms() const {
1352  return get_new_prediction(_alloc_rate_ms_seq);
1353}
1354
1355double G1CollectorPolicy::predict_cost_per_card_ms() const {
1356  return get_new_prediction(_cost_per_card_ms_seq);
1357}
1358
1359double G1CollectorPolicy::predict_scan_hcc_ms() const {
1360  return get_new_prediction(_cost_scan_hcc_seq);
1361}
1362
1363double G1CollectorPolicy::predict_rs_update_time_ms(size_t pending_cards) const {
1364  return pending_cards * predict_cost_per_card_ms() + predict_scan_hcc_ms();
1365}
1366
1367double G1CollectorPolicy::predict_young_cards_per_entry_ratio() const {
1368  return get_new_prediction(_young_cards_per_entry_ratio_seq);
1369}
1370
1371double G1CollectorPolicy::predict_mixed_cards_per_entry_ratio() const {
1372  if (_mixed_cards_per_entry_ratio_seq->num() < 2) {
1373    return predict_young_cards_per_entry_ratio();
1374  } else {
1375    return get_new_prediction(_mixed_cards_per_entry_ratio_seq);
1376  }
1377}
1378
1379size_t G1CollectorPolicy::predict_young_card_num(size_t rs_length) const {
1380  return (size_t) (rs_length * predict_young_cards_per_entry_ratio());
1381}
1382
1383size_t G1CollectorPolicy::predict_non_young_card_num(size_t rs_length) const {
1384  return (size_t)(rs_length * predict_mixed_cards_per_entry_ratio());
1385}
1386
1387double G1CollectorPolicy::predict_rs_scan_time_ms(size_t card_num) const {
1388  if (collector_state()->gcs_are_young()) {
1389    return card_num * get_new_prediction(_cost_per_entry_ms_seq);
1390  } else {
1391    return predict_mixed_rs_scan_time_ms(card_num);
1392  }
1393}
1394
1395double G1CollectorPolicy::predict_mixed_rs_scan_time_ms(size_t card_num) const {
1396  if (_mixed_cost_per_entry_ms_seq->num() < 3) {
1397    return card_num * get_new_prediction(_cost_per_entry_ms_seq);
1398  } else {
1399    return card_num * get_new_prediction(_mixed_cost_per_entry_ms_seq);
1400  }
1401}
1402
1403double G1CollectorPolicy::predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const {
1404  if (_cost_per_byte_ms_during_cm_seq->num() < 3) {
1405    return (1.1 * bytes_to_copy) * get_new_prediction(_cost_per_byte_ms_seq);
1406  } else {
1407    return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_during_cm_seq);
1408  }
1409}
1410
1411double G1CollectorPolicy::predict_object_copy_time_ms(size_t bytes_to_copy) const {
1412  if (collector_state()->during_concurrent_mark()) {
1413    return predict_object_copy_time_ms_during_cm(bytes_to_copy);
1414  } else {
1415    return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_seq);
1416  }
1417}
1418
1419double G1CollectorPolicy::predict_constant_other_time_ms() const {
1420  return get_new_prediction(_constant_other_time_ms_seq);
1421}
1422
1423double G1CollectorPolicy::predict_young_other_time_ms(size_t young_num) const {
1424  return young_num * get_new_prediction(_young_other_cost_per_region_ms_seq);
1425}
1426
1427double G1CollectorPolicy::predict_non_young_other_time_ms(size_t non_young_num) const {
1428  return non_young_num * get_new_prediction(_non_young_other_cost_per_region_ms_seq);
1429}
1430
1431double G1CollectorPolicy::predict_remark_time_ms() const {
1432  return get_new_prediction(_concurrent_mark_remark_times_ms);
1433}
1434
1435double G1CollectorPolicy::predict_cleanup_time_ms() const {
1436  return get_new_prediction(_concurrent_mark_cleanup_times_ms);
1437}
1438
1439double G1CollectorPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const {
1440  TruncatedSeq* seq = surv_rate_group->get_seq(age);
1441  guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age);
1442  double pred = get_new_prediction(seq);
1443  if (pred > 1.0) {
1444    pred = 1.0;
1445  }
1446  return pred;
1447}
1448
1449double G1CollectorPolicy::predict_yg_surv_rate(int age) const {
1450  return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
1451}
1452
1453double G1CollectorPolicy::accum_yg_surv_rate_pred(int age) const {
1454  return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
1455}
1456
1457double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
1458                                                       size_t scanned_cards) const {
1459  return
1460    predict_rs_update_time_ms(pending_cards) +
1461    predict_rs_scan_time_ms(scanned_cards) +
1462    predict_constant_other_time_ms();
1463}
1464
1465double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const {
1466  size_t rs_length = predict_rs_length_diff();
1467  size_t card_num;
1468  if (collector_state()->gcs_are_young()) {
1469    card_num = predict_young_card_num(rs_length);
1470  } else {
1471    card_num = predict_non_young_card_num(rs_length);
1472  }
1473  return predict_base_elapsed_time_ms(pending_cards, card_num);
1474}
1475
1476size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) const {
1477  size_t bytes_to_copy;
1478  if (hr->is_marked())
1479    bytes_to_copy = hr->max_live_bytes();
1480  else {
1481    assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
1482    int age = hr->age_in_surv_rate_group();
1483    double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
1484    bytes_to_copy = (size_t) (hr->used() * yg_surv_rate);
1485  }
1486  return bytes_to_copy;
1487}
1488
1489double G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
1490                                                         bool for_young_gc) const {
1491  size_t rs_length = hr->rem_set()->occupied();
1492  size_t card_num;
1493
1494  // Predicting the number of cards is based on which type of GC
1495  // we're predicting for.
1496  if (for_young_gc) {
1497    card_num = predict_young_card_num(rs_length);
1498  } else {
1499    card_num = predict_non_young_card_num(rs_length);
1500  }
1501  size_t bytes_to_copy = predict_bytes_to_copy(hr);
1502
1503  double region_elapsed_time_ms =
1504    predict_rs_scan_time_ms(card_num) +
1505    predict_object_copy_time_ms(bytes_to_copy);
1506
1507  // The prediction of the "other" time for this region is based
1508  // upon the region type and NOT the GC type.
1509  if (hr->is_young()) {
1510    region_elapsed_time_ms += predict_young_other_time_ms(1);
1511  } else {
1512    region_elapsed_time_ms += predict_non_young_other_time_ms(1);
1513  }
1514  return region_elapsed_time_ms;
1515}
1516
1517void G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length,
1518                                                 uint survivor_cset_region_length) {
1519  _eden_cset_region_length     = eden_cset_region_length;
1520  _survivor_cset_region_length = survivor_cset_region_length;
1521  _old_cset_region_length      = 0;
1522}
1523
1524void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
1525  _recorded_rs_lengths = rs_lengths;
1526}
1527
1528void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
1529                                               double elapsed_ms) {
1530  _recent_gc_times_ms->add(elapsed_ms);
1531  _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
1532  _prev_collection_pause_end_ms = end_time_sec * 1000.0;
1533}
1534
1535void G1CollectorPolicy::clear_ratio_check_data() {
1536  _ratio_over_threshold_count = 0;
1537  _ratio_over_threshold_sum = 0.0;
1538  _pauses_since_start = 0;
1539}
1540
1541size_t G1CollectorPolicy::expansion_amount() {
1542  double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
1543  double last_gc_overhead = _last_pause_time_ratio * 100.0;
1544  double threshold = _gc_overhead_perc;
1545  size_t expand_bytes = 0;
1546
1547  // If the heap is at less than half its maximum size, scale the threshold down,
1548  // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand,
1549  // though the scaling code will likely keep the increase small.
1550  if (_g1->capacity() <= _g1->max_capacity() / 2) {
1551    threshold *= (double)_g1->capacity() / (double)(_g1->max_capacity() / 2);
1552    threshold = MAX2(threshold, 1.0);
1553  }
1554
1555  // If the last GC time ratio is over the threshold, increment the count of
1556  // times it has been exceeded, and add this ratio to the sum of exceeded
1557  // ratios.
1558  if (last_gc_overhead > threshold) {
1559    _ratio_over_threshold_count++;
1560    _ratio_over_threshold_sum += last_gc_overhead;
1561  }
1562
1563  // Check if we've had enough GC time ratio checks that were over the
1564  // threshold to trigger an expansion. We'll also expand if we've
1565  // reached the end of the history buffer and the average of all entries
1566  // is still over the threshold. This indicates a smaller number of GCs were
1567  // long enough to make the average exceed the threshold.
1568  bool filled_history_buffer = _pauses_since_start == NumPrevPausesForHeuristics;
1569  if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) ||
1570      (filled_history_buffer && (recent_gc_overhead > threshold))) {
1571    size_t min_expand_bytes = HeapRegion::GrainBytes;
1572    size_t reserved_bytes = _g1->max_capacity();
1573    size_t committed_bytes = _g1->capacity();
1574    size_t uncommitted_bytes = reserved_bytes - committed_bytes;
1575    size_t expand_bytes_via_pct =
1576      uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
1577    double scale_factor = 1.0;
1578
1579    // If the current size is less than 1/4 of the Initial heap size, expand
1580    // by half of the delta between the current and Initial sizes. IE, grow
1581    // back quickly.
1582    //
1583    // Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of
1584    // the available expansion space, whichever is smaller, as the base
1585    // expansion size. Then possibly scale this size according to how much the
1586    // threshold has (on average) been exceeded by. If the delta is small
1587    // (less than the StartScaleDownAt value), scale the size down linearly, but
1588    // not by less than MinScaleDownFactor. If the delta is large (greater than
1589    // the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor
1590    // times the base size. The scaling will be linear in the range from
1591    // StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words,
1592    // ScaleUpRange sets the rate of scaling up.
1593    if (committed_bytes < InitialHeapSize / 4) {
1594      expand_bytes = (InitialHeapSize - committed_bytes) / 2;
1595    } else {
1596      double const MinScaleDownFactor = 0.2;
1597      double const MaxScaleUpFactor = 2;
1598      double const StartScaleDownAt = _gc_overhead_perc;
1599      double const StartScaleUpAt = _gc_overhead_perc * 1.5;
1600      double const ScaleUpRange = _gc_overhead_perc * 2.0;
1601
1602      double ratio_delta;
1603      if (filled_history_buffer) {
1604        ratio_delta = recent_gc_overhead - threshold;
1605      } else {
1606        ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold;
1607      }
1608
1609      expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
1610      if (ratio_delta < StartScaleDownAt) {
1611        scale_factor = ratio_delta / StartScaleDownAt;
1612        scale_factor = MAX2(scale_factor, MinScaleDownFactor);
1613      } else if (ratio_delta > StartScaleUpAt) {
1614        scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange);
1615        scale_factor = MIN2(scale_factor, MaxScaleUpFactor);
1616      }
1617    }
1618
1619    log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) "
1620                              "recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)",
1621                              recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100);
1622
1623    expand_bytes = static_cast<size_t>(expand_bytes * scale_factor);
1624
1625    // Ensure the expansion size is at least the minimum growth amount
1626    // and at most the remaining uncommitted byte size.
1627    expand_bytes = MAX2(expand_bytes, min_expand_bytes);
1628    expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
1629
1630    clear_ratio_check_data();
1631  } else {
1632    // An expansion was not triggered. If we've started counting, increment
1633    // the number of checks we've made in the current window.  If we've
1634    // reached the end of the window without resizing, clear the counters to
1635    // start again the next time we see a ratio above the threshold.
1636    if (_ratio_over_threshold_count > 0) {
1637      _pauses_since_start++;
1638      if (_pauses_since_start > NumPrevPausesForHeuristics) {
1639        clear_ratio_check_data();
1640      }
1641    }
1642  }
1643
1644  return expand_bytes;
1645}
1646
1647void G1CollectorPolicy::print_tracing_info() const {
1648  _trace_young_gen_time_data.print();
1649  _trace_old_gen_time_data.print();
1650}
1651
1652void G1CollectorPolicy::print_yg_surv_rate_info() const {
1653#ifndef PRODUCT
1654  _short_lived_surv_rate_group->print_surv_rate_summary();
1655  // add this call for any other surv rate groups
1656#endif // PRODUCT
1657}
1658
1659bool G1CollectorPolicy::is_young_list_full() const {
1660  uint young_list_length = _g1->young_list()->length();
1661  uint young_list_target_length = _young_list_target_length;
1662  return young_list_length >= young_list_target_length;
1663}
1664
1665bool G1CollectorPolicy::can_expand_young_list() const {
1666  uint young_list_length = _g1->young_list()->length();
1667  uint young_list_max_length = _young_list_max_length;
1668  return young_list_length < young_list_max_length;
1669}
1670
1671void G1CollectorPolicy::update_max_gc_locker_expansion() {
1672  uint expansion_region_num = 0;
1673  if (GCLockerEdenExpansionPercent > 0) {
1674    double perc = (double) GCLockerEdenExpansionPercent / 100.0;
1675    double expansion_region_num_d = perc * (double) _young_list_target_length;
1676    // We use ceiling so that if expansion_region_num_d is > 0.0 (but
1677    // less than 1.0) we'll get 1.
1678    expansion_region_num = (uint) ceil(expansion_region_num_d);
1679  } else {
1680    assert(expansion_region_num == 0, "sanity");
1681  }
1682  _young_list_max_length = _young_list_target_length + expansion_region_num;
1683  assert(_young_list_target_length <= _young_list_max_length, "post-condition");
1684}
1685
1686// Calculates survivor space parameters.
1687void G1CollectorPolicy::update_survivors_policy() {
1688  double max_survivor_regions_d =
1689                 (double) _young_list_target_length / (double) SurvivorRatio;
1690  // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
1691  // smaller than 1.0) we'll get 1.
1692  _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
1693
1694  _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
1695        HeapRegion::GrainWords * _max_survivor_regions, counters());
1696}
1697
1698bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {
1699  // We actually check whether we are marking here and not if we are in a
1700  // reclamation phase. This means that we will schedule a concurrent mark
1701  // even while we are still in the process of reclaiming memory.
1702  bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1703  if (!during_cycle) {
1704    log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause));
1705    collector_state()->set_initiate_conc_mark_if_possible(true);
1706    return true;
1707  } else {
1708    log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause));
1709    return false;
1710  }
1711}
1712
1713void G1CollectorPolicy::initiate_conc_mark() {
1714  collector_state()->set_during_initial_mark_pause(true);
1715  collector_state()->set_initiate_conc_mark_if_possible(false);
1716}
1717
1718void G1CollectorPolicy::decide_on_conc_mark_initiation() {
1719  // We are about to decide on whether this pause will be an
1720  // initial-mark pause.
1721
1722  // First, collector_state()->during_initial_mark_pause() should not be already set. We
1723  // will set it here if we have to. However, it should be cleared by
1724  // the end of the pause (it's only set for the duration of an
1725  // initial-mark pause).
1726  assert(!collector_state()->during_initial_mark_pause(), "pre-condition");
1727
1728  if (collector_state()->initiate_conc_mark_if_possible()) {
1729    // We had noticed on a previous pause that the heap occupancy has
1730    // gone over the initiating threshold and we should start a
1731    // concurrent marking cycle. So we might initiate one.
1732
1733    if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) {
1734      // Initiate a new initial mark if there is no marking or reclamation going on.
1735      initiate_conc_mark();
1736      log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)");
1737    } else if (_g1->is_user_requested_concurrent_full_gc(_g1->gc_cause())) {
1738      // Initiate a user requested initial mark. An initial mark must be young only
1739      // GC, so the collector state must be updated to reflect this.
1740      collector_state()->set_gcs_are_young(true);
1741      collector_state()->set_last_young_gc(false);
1742
1743      abort_time_to_mixed_tracking();
1744      initiate_conc_mark();
1745      log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)");
1746    } else {
1747      // The concurrent marking thread is still finishing up the
1748      // previous cycle. If we start one right now the two cycles
1749      // overlap. In particular, the concurrent marking thread might
1750      // be in the process of clearing the next marking bitmap (which
1751      // we will use for the next cycle if we start one). Starting a
1752      // cycle now will be bad given that parts of the marking
1753      // information might get cleared by the marking thread. And we
1754      // cannot wait for the marking thread to finish the cycle as it
1755      // periodically yields while clearing the next marking bitmap
1756      // and, if it's in a yield point, it's waiting for us to
1757      // finish. So, at this point we will not start a cycle and we'll
1758      // let the concurrent marking thread complete the last one.
1759      log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)");
1760    }
1761  }
1762}
1763
1764class ParKnownGarbageHRClosure: public HeapRegionClosure {
1765  G1CollectedHeap* _g1h;
1766  CSetChooserParUpdater _cset_updater;
1767
1768public:
1769  ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
1770                           uint chunk_size) :
1771    _g1h(G1CollectedHeap::heap()),
1772    _cset_updater(hrSorted, true /* parallel */, chunk_size) { }
1773
1774  bool doHeapRegion(HeapRegion* r) {
1775    // Do we have any marking information for this region?
1776    if (r->is_marked()) {
1777      // We will skip any region that's currently used as an old GC
1778      // alloc region (we should not consider those for collection
1779      // before we fill them up).
1780      if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
1781        _cset_updater.add_region(r);
1782      }
1783    }
1784    return false;
1785  }
1786};
1787
1788class ParKnownGarbageTask: public AbstractGangTask {
1789  CollectionSetChooser* _hrSorted;
1790  uint _chunk_size;
1791  G1CollectedHeap* _g1;
1792  HeapRegionClaimer _hrclaimer;
1793
1794public:
1795  ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) :
1796      AbstractGangTask("ParKnownGarbageTask"),
1797      _hrSorted(hrSorted), _chunk_size(chunk_size),
1798      _g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {}
1799
1800  void work(uint worker_id) {
1801    ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
1802    _g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer);
1803  }
1804};
1805
1806uint G1CollectorPolicy::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const {
1807  assert(n_workers > 0, "Active gc workers should be greater than 0");
1808  const uint overpartition_factor = 4;
1809  const uint min_chunk_size = MAX2(n_regions / n_workers, 1U);
1810  return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
1811}
1812
1813void G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
1814  cset_chooser()->clear();
1815
1816  WorkGang* workers = _g1->workers();
1817  uint n_workers = workers->active_workers();
1818
1819  uint n_regions = _g1->num_regions();
1820  uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
1821  cset_chooser()->prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
1822  ParKnownGarbageTask par_known_garbage_task(cset_chooser(), chunk_size, n_workers);
1823  workers->run_task(&par_known_garbage_task);
1824
1825  cset_chooser()->sort_regions();
1826
1827  double end_sec = os::elapsedTime();
1828  double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1829  _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
1830  _prev_collection_pause_end_ms += elapsed_time_ms;
1831
1832  record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
1833}
1834
1835// Add the heap region at the head of the non-incremental collection set
1836void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
1837  assert(_inc_cset_build_state == Active, "Precondition");
1838  assert(hr->is_old(), "the region should be old");
1839
1840  assert(!hr->in_collection_set(), "should not already be in the CSet");
1841  _g1->register_old_region_with_cset(hr);
1842  hr->set_next_in_collection_set(_collection_set);
1843  _collection_set = hr;
1844  _collection_set_bytes_used_before += hr->used();
1845  size_t rs_length = hr->rem_set()->occupied();
1846  _recorded_rs_lengths += rs_length;
1847  _old_cset_region_length += 1;
1848}
1849
1850// Initialize the per-collection-set information
1851void G1CollectorPolicy::start_incremental_cset_building() {
1852  assert(_inc_cset_build_state == Inactive, "Precondition");
1853
1854  _inc_cset_head = NULL;
1855  _inc_cset_tail = NULL;
1856  _inc_cset_bytes_used_before = 0;
1857
1858  _inc_cset_max_finger = 0;
1859  _inc_cset_recorded_rs_lengths = 0;
1860  _inc_cset_recorded_rs_lengths_diffs = 0;
1861  _inc_cset_predicted_elapsed_time_ms = 0.0;
1862  _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
1863  _inc_cset_build_state = Active;
1864}
1865
1866void G1CollectorPolicy::finalize_incremental_cset_building() {
1867  assert(_inc_cset_build_state == Active, "Precondition");
1868  assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
1869
1870  // The two "main" fields, _inc_cset_recorded_rs_lengths and
1871  // _inc_cset_predicted_elapsed_time_ms, are updated by the thread
1872  // that adds a new region to the CSet. Further updates by the
1873  // concurrent refinement thread that samples the young RSet lengths
1874  // are accumulated in the *_diffs fields. Here we add the diffs to
1875  // the "main" fields.
1876
1877  if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
1878    _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;
1879  } else {
1880    // This is defensive. The diff should in theory be always positive
1881    // as RSets can only grow between GCs. However, given that we
1882    // sample their size concurrently with other threads updating them
1883    // it's possible that we might get the wrong size back, which
1884    // could make the calculations somewhat inaccurate.
1885    size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs);
1886    if (_inc_cset_recorded_rs_lengths >= diffs) {
1887      _inc_cset_recorded_rs_lengths -= diffs;
1888    } else {
1889      _inc_cset_recorded_rs_lengths = 0;
1890    }
1891  }
1892  _inc_cset_predicted_elapsed_time_ms +=
1893                                     _inc_cset_predicted_elapsed_time_ms_diffs;
1894
1895  _inc_cset_recorded_rs_lengths_diffs = 0;
1896  _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
1897}
1898
1899void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
1900  // This routine is used when:
1901  // * adding survivor regions to the incremental cset at the end of an
1902  //   evacuation pause,
1903  // * adding the current allocation region to the incremental cset
1904  //   when it is retired, and
1905  // * updating existing policy information for a region in the
1906  //   incremental cset via young list RSet sampling.
1907  // Therefore this routine may be called at a safepoint by the
1908  // VM thread, or in-between safepoints by mutator threads (when
1909  // retiring the current allocation region) or a concurrent
1910  // refine thread (RSet sampling).
1911
1912  double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
1913  size_t used_bytes = hr->used();
1914  _inc_cset_recorded_rs_lengths += rs_length;
1915  _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
1916  _inc_cset_bytes_used_before += used_bytes;
1917
1918  // Cache the values we have added to the aggregated information
1919  // in the heap region in case we have to remove this region from
1920  // the incremental collection set, or it is updated by the
1921  // rset sampling code
1922  hr->set_recorded_rs_length(rs_length);
1923  hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
1924}
1925
1926void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
1927                                                     size_t new_rs_length) {
1928  // Update the CSet information that is dependent on the new RS length
1929  assert(hr->is_young(), "Precondition");
1930  assert(!SafepointSynchronize::is_at_safepoint(),
1931                                               "should not be at a safepoint");
1932
1933  // We could have updated _inc_cset_recorded_rs_lengths and
1934  // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
1935  // that atomically, as this code is executed by a concurrent
1936  // refinement thread, potentially concurrently with a mutator thread
1937  // allocating a new region and also updating the same fields. To
1938  // avoid the atomic operations we accumulate these updates on two
1939  // separate fields (*_diffs) and we'll just add them to the "main"
1940  // fields at the start of a GC.
1941
1942  ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
1943  ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
1944  _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
1945
1946  double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
1947  double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
1948  double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
1949  _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
1950
1951  hr->set_recorded_rs_length(new_rs_length);
1952  hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
1953}
1954
1955void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
1956  assert(hr->is_young(), "invariant");
1957  assert(hr->young_index_in_cset() > -1, "should have already been set");
1958  assert(_inc_cset_build_state == Active, "Precondition");
1959
1960  // We need to clear and set the cached recorded/cached collection set
1961  // information in the heap region here (before the region gets added
1962  // to the collection set). An individual heap region's cached values
1963  // are calculated, aggregated with the policy collection set info,
1964  // and cached in the heap region here (initially) and (subsequently)
1965  // by the Young List sampling code.
1966
1967  size_t rs_length = hr->rem_set()->occupied();
1968  add_to_incremental_cset_info(hr, rs_length);
1969
1970  HeapWord* hr_end = hr->end();
1971  _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
1972
1973  assert(!hr->in_collection_set(), "invariant");
1974  _g1->register_young_region_with_cset(hr);
1975  assert(hr->next_in_collection_set() == NULL, "invariant");
1976}
1977
1978// Add the region at the RHS of the incremental cset
1979void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
1980  // We should only ever be appending survivors at the end of a pause
1981  assert(hr->is_survivor(), "Logic");
1982
1983  // Do the 'common' stuff
1984  add_region_to_incremental_cset_common(hr);
1985
1986  // Now add the region at the right hand side
1987  if (_inc_cset_tail == NULL) {
1988    assert(_inc_cset_head == NULL, "invariant");
1989    _inc_cset_head = hr;
1990  } else {
1991    _inc_cset_tail->set_next_in_collection_set(hr);
1992  }
1993  _inc_cset_tail = hr;
1994}
1995
1996// Add the region to the LHS of the incremental cset
1997void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
1998  // Survivors should be added to the RHS at the end of a pause
1999  assert(hr->is_eden(), "Logic");
2000
2001  // Do the 'common' stuff
2002  add_region_to_incremental_cset_common(hr);
2003
2004  // Add the region at the left hand side
2005  hr->set_next_in_collection_set(_inc_cset_head);
2006  if (_inc_cset_head == NULL) {
2007    assert(_inc_cset_tail == NULL, "Invariant");
2008    _inc_cset_tail = hr;
2009  }
2010  _inc_cset_head = hr;
2011}
2012
2013#ifndef PRODUCT
2014void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
2015  assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
2016
2017  st->print_cr("\nCollection_set:");
2018  HeapRegion* csr = list_head;
2019  while (csr != NULL) {
2020    HeapRegion* next = csr->next_in_collection_set();
2021    assert(csr->in_collection_set(), "bad CS");
2022    st->print_cr("  " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d",
2023                 HR_FORMAT_PARAMS(csr),
2024                 p2i(csr->prev_top_at_mark_start()), p2i(csr->next_top_at_mark_start()),
2025                 csr->age_in_surv_rate_group_cond());
2026    csr = next;
2027  }
2028}
2029#endif // !PRODUCT
2030
2031double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const {
2032  // Returns the given amount of reclaimable bytes (that represents
2033  // the amount of reclaimable space still to be collected) as a
2034  // percentage of the current heap capacity.
2035  size_t capacity_bytes = _g1->capacity();
2036  return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
2037}
2038
2039void G1CollectorPolicy::maybe_start_marking() {
2040  if (need_to_start_conc_mark("end of GC")) {
2041    // Note: this might have already been set, if during the last
2042    // pause we decided to start a cycle but at the beginning of
2043    // this pause we decided to postpone it. That's OK.
2044    collector_state()->set_initiate_conc_mark_if_possible(true);
2045  }
2046}
2047
2048G1CollectorPolicy::PauseKind G1CollectorPolicy::young_gc_pause_kind() const {
2049  assert(!collector_state()->full_collection(), "must be");
2050  if (collector_state()->during_initial_mark_pause()) {
2051    assert(collector_state()->last_gc_was_young(), "must be");
2052    assert(!collector_state()->last_young_gc(), "must be");
2053    return InitialMarkGC;
2054  } else if (collector_state()->last_young_gc()) {
2055    assert(!collector_state()->during_initial_mark_pause(), "must be");
2056    assert(collector_state()->last_gc_was_young(), "must be");
2057    return LastYoungGC;
2058  } else if (!collector_state()->last_gc_was_young()) {
2059    assert(!collector_state()->during_initial_mark_pause(), "must be");
2060    assert(!collector_state()->last_young_gc(), "must be");
2061    return MixedGC;
2062  } else {
2063    assert(collector_state()->last_gc_was_young(), "must be");
2064    assert(!collector_state()->during_initial_mark_pause(), "must be");
2065    assert(!collector_state()->last_young_gc(), "must be");
2066    return YoungOnlyGC;
2067  }
2068}
2069
2070void G1CollectorPolicy::record_pause(PauseKind kind, double start, double end) {
2071  // Manage the MMU tracker. For some reason it ignores Full GCs.
2072  if (kind != FullGC) {
2073    _mmu_tracker->add_pause(start, end);
2074  }
2075  // Manage the mutator time tracking from initial mark to first mixed gc.
2076  switch (kind) {
2077    case FullGC:
2078      abort_time_to_mixed_tracking();
2079      break;
2080    case Cleanup:
2081    case Remark:
2082    case YoungOnlyGC:
2083    case LastYoungGC:
2084      _initial_mark_to_mixed.add_pause(end - start);
2085      break;
2086    case InitialMarkGC:
2087      _initial_mark_to_mixed.record_initial_mark_end(end);
2088      break;
2089    case MixedGC:
2090      _initial_mark_to_mixed.record_mixed_gc_start(start);
2091      break;
2092    default:
2093      ShouldNotReachHere();
2094  }
2095}
2096
2097void G1CollectorPolicy::abort_time_to_mixed_tracking() {
2098  _initial_mark_to_mixed.reset();
2099}
2100
2101bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
2102                                                const char* false_action_str) const {
2103  if (cset_chooser()->is_empty()) {
2104    log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);
2105    return false;
2106  }
2107
2108  // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
2109  size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
2110  double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
2111  double threshold = (double) G1HeapWastePercent;
2112  if (reclaimable_perc <= threshold) {
2113    log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
2114                        false_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);
2115    return false;
2116  }
2117  log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
2118                      true_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);
2119  return true;
2120}
2121
2122uint G1CollectorPolicy::calc_min_old_cset_length() const {
2123  // The min old CSet region bound is based on the maximum desired
2124  // number of mixed GCs after a cycle. I.e., even if some old regions
2125  // look expensive, we should add them to the CSet anyway to make
2126  // sure we go through the available old regions in no more than the
2127  // maximum desired number of mixed GCs.
2128  //
2129  // The calculation is based on the number of marked regions we added
2130  // to the CSet chooser in the first place, not how many remain, so
2131  // that the result is the same during all mixed GCs that follow a cycle.
2132
2133  const size_t region_num = (size_t) cset_chooser()->length();
2134  const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
2135  size_t result = region_num / gc_num;
2136  // emulate ceiling
2137  if (result * gc_num < region_num) {
2138    result += 1;
2139  }
2140  return (uint) result;
2141}
2142
2143uint G1CollectorPolicy::calc_max_old_cset_length() const {
2144  // The max old CSet region bound is based on the threshold expressed
2145  // as a percentage of the heap size. I.e., it should bound the
2146  // number of old regions added to the CSet irrespective of how many
2147  // of them are available.
2148
2149  const G1CollectedHeap* g1h = G1CollectedHeap::heap();
2150  const size_t region_num = g1h->num_regions();
2151  const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
2152  size_t result = region_num * perc / 100;
2153  // emulate ceiling
2154  if (100 * result < region_num * perc) {
2155    result += 1;
2156  }
2157  return (uint) result;
2158}
2159
2160
2161double G1CollectorPolicy::finalize_young_cset_part(double target_pause_time_ms) {
2162  double young_start_time_sec = os::elapsedTime();
2163
2164  YoungList* young_list = _g1->young_list();
2165  finalize_incremental_cset_building();
2166
2167  guarantee(target_pause_time_ms > 0.0,
2168            "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
2169  guarantee(_collection_set == NULL, "Precondition");
2170
2171  double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
2172  double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
2173
2174  log_trace(gc, ergo, cset)("Start choosing CSet. pending cards: " SIZE_FORMAT " predicted base time: %1.2fms remaining time: %1.2fms target pause time: %1.2fms",
2175                            _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
2176
2177  collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young());
2178
2179  if (collector_state()->last_gc_was_young()) {
2180    _trace_young_gen_time_data.increment_young_collection_count();
2181  } else {
2182    _trace_young_gen_time_data.increment_mixed_collection_count();
2183  }
2184
2185  // The young list is laid with the survivor regions from the previous
2186  // pause are appended to the RHS of the young list, i.e.
2187  //   [Newly Young Regions ++ Survivors from last pause].
2188
2189  uint survivor_region_length = young_list->survivor_length();
2190  uint eden_region_length = young_list->eden_length();
2191  init_cset_region_lengths(eden_region_length, survivor_region_length);
2192
2193  HeapRegion* hr = young_list->first_survivor_region();
2194  while (hr != NULL) {
2195    assert(hr->is_survivor(), "badly formed young list");
2196    // There is a convention that all the young regions in the CSet
2197    // are tagged as "eden", so we do this for the survivors here. We
2198    // use the special set_eden_pre_gc() as it doesn't check that the
2199    // region is free (which is not the case here).
2200    hr->set_eden_pre_gc();
2201    hr = hr->get_next_young_region();
2202  }
2203
2204  // Clear the fields that point to the survivor list - they are all young now.
2205  young_list->clear_survivors();
2206
2207  _collection_set = _inc_cset_head;
2208  _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
2209  time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0);
2210
2211  log_trace(gc, ergo, cset)("Add young regions to CSet. eden: %u regions, survivors: %u regions, predicted young region time: %1.2fms, target pause time: %1.2fms",
2212                            eden_region_length, survivor_region_length, _inc_cset_predicted_elapsed_time_ms, target_pause_time_ms);
2213
2214  // The number of recorded young regions is the incremental
2215  // collection set's current size
2216  set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
2217
2218  double young_end_time_sec = os::elapsedTime();
2219  phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
2220
2221  return time_remaining_ms;
2222}
2223
2224void G1CollectorPolicy::finalize_old_cset_part(double time_remaining_ms) {
2225  double non_young_start_time_sec = os::elapsedTime();
2226  double predicted_old_time_ms = 0.0;
2227
2228
2229  if (!collector_state()->gcs_are_young()) {
2230    cset_chooser()->verify();
2231    const uint min_old_cset_length = calc_min_old_cset_length();
2232    const uint max_old_cset_length = calc_max_old_cset_length();
2233
2234    uint expensive_region_num = 0;
2235    bool check_time_remaining = adaptive_young_list_length();
2236
2237    HeapRegion* hr = cset_chooser()->peek();
2238    while (hr != NULL) {
2239      if (old_cset_region_length() >= max_old_cset_length) {
2240        // Added maximum number of old regions to the CSet.
2241        log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached max). old %u regions, max %u regions",
2242                                  old_cset_region_length(), max_old_cset_length);
2243        break;
2244      }
2245
2246
2247      // Stop adding regions if the remaining reclaimable space is
2248      // not above G1HeapWastePercent.
2249      size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
2250      double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
2251      double threshold = (double) G1HeapWastePercent;
2252      if (reclaimable_perc <= threshold) {
2253        // We've added enough old regions that the amount of uncollected
2254        // reclaimable space is at or below the waste threshold. Stop
2255        // adding old regions to the CSet.
2256        log_debug(gc, ergo, cset)("Finish adding old regions to CSet (reclaimable percentage not over threshold). "
2257                                  "old %u regions, max %u regions, reclaimable: " SIZE_FORMAT "B (%1.2f%%) threshold: " UINTX_FORMAT "%%",
2258                                  old_cset_region_length(), max_old_cset_length, reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);
2259        break;
2260      }
2261
2262      double predicted_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
2263      if (check_time_remaining) {
2264        if (predicted_time_ms > time_remaining_ms) {
2265          // Too expensive for the current CSet.
2266
2267          if (old_cset_region_length() >= min_old_cset_length) {
2268            // We have added the minimum number of old regions to the CSet,
2269            // we are done with this CSet.
2270            log_debug(gc, ergo, cset)("Finish adding old regions to CSet (predicted time is too high). "
2271                                      "predicted time: %1.2fms, remaining time: %1.2fms old %u regions, min %u regions",
2272                                      predicted_time_ms, time_remaining_ms, old_cset_region_length(), min_old_cset_length);
2273            break;
2274          }
2275
2276          // We'll add it anyway given that we haven't reached the
2277          // minimum number of old regions.
2278          expensive_region_num += 1;
2279        }
2280      } else {
2281        if (old_cset_region_length() >= min_old_cset_length) {
2282          // In the non-auto-tuning case, we'll finish adding regions
2283          // to the CSet if we reach the minimum.
2284
2285          log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached min). old %u regions, min %u regions",
2286                                    old_cset_region_length(), min_old_cset_length);
2287          break;
2288        }
2289      }
2290
2291      // We will add this region to the CSet.
2292      time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
2293      predicted_old_time_ms += predicted_time_ms;
2294      cset_chooser()->pop(); // already have region via peek()
2295      _g1->old_set_remove(hr);
2296      add_old_region_to_cset(hr);
2297
2298      hr = cset_chooser()->peek();
2299    }
2300    if (hr == NULL) {
2301      log_debug(gc, ergo, cset)("Finish adding old regions to CSet (candidate old regions not available)");
2302    }
2303
2304    if (expensive_region_num > 0) {
2305      // We print the information once here at the end, predicated on
2306      // whether we added any apparently expensive regions or not, to
2307      // avoid generating output per region.
2308      log_debug(gc, ergo, cset)("Added expensive regions to CSet (old CSet region num not reached min)."
2309                                "old %u regions, expensive: %u regions, min %u regions, remaining time: %1.2fms",
2310                                old_cset_region_length(), expensive_region_num, min_old_cset_length, time_remaining_ms);
2311    }
2312
2313    cset_chooser()->verify();
2314  }
2315
2316  stop_incremental_cset_building();
2317
2318  log_debug(gc, ergo, cset)("Finish choosing CSet. old %u regions, predicted old region time: %1.2fms, time remaining: %1.2f",
2319                            old_cset_region_length(), predicted_old_time_ms, time_remaining_ms);
2320
2321  double non_young_end_time_sec = os::elapsedTime();
2322  phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
2323}
2324
2325void TraceYoungGenTimeData::record_start_collection(double time_to_stop_the_world_ms) {
2326  if(TraceYoungGenTime) {
2327    _all_stop_world_times_ms.add(time_to_stop_the_world_ms);
2328  }
2329}
2330
2331void TraceYoungGenTimeData::record_yield_time(double yield_time_ms) {
2332  if(TraceYoungGenTime) {
2333    _all_yield_times_ms.add(yield_time_ms);
2334  }
2335}
2336
2337void TraceYoungGenTimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) {
2338  if(TraceYoungGenTime) {
2339    _total.add(pause_time_ms);
2340    _other.add(pause_time_ms - phase_times->accounted_time_ms());
2341    _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms());
2342    _parallel.add(phase_times->cur_collection_par_time_ms());
2343    _ext_root_scan.add(phase_times->average_time_ms(G1GCPhaseTimes::ExtRootScan));
2344    _satb_filtering.add(phase_times->average_time_ms(G1GCPhaseTimes::SATBFiltering));
2345    _update_rs.add(phase_times->average_time_ms(G1GCPhaseTimes::UpdateRS));
2346    _scan_rs.add(phase_times->average_time_ms(G1GCPhaseTimes::ScanRS));
2347    _obj_copy.add(phase_times->average_time_ms(G1GCPhaseTimes::ObjCopy));
2348    _termination.add(phase_times->average_time_ms(G1GCPhaseTimes::Termination));
2349
2350    double parallel_known_time = phase_times->average_time_ms(G1GCPhaseTimes::ExtRootScan) +
2351      phase_times->average_time_ms(G1GCPhaseTimes::SATBFiltering) +
2352      phase_times->average_time_ms(G1GCPhaseTimes::UpdateRS) +
2353      phase_times->average_time_ms(G1GCPhaseTimes::ScanRS) +
2354      phase_times->average_time_ms(G1GCPhaseTimes::ObjCopy) +
2355      phase_times->average_time_ms(G1GCPhaseTimes::Termination);
2356
2357    double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time;
2358    _parallel_other.add(parallel_other_time);
2359    _clear_ct.add(phase_times->cur_clear_ct_time_ms());
2360  }
2361}
2362
2363void TraceYoungGenTimeData::increment_young_collection_count() {
2364  if(TraceYoungGenTime) {
2365    ++_young_pause_num;
2366  }
2367}
2368
2369void TraceYoungGenTimeData::increment_mixed_collection_count() {
2370  if(TraceYoungGenTime) {
2371    ++_mixed_pause_num;
2372  }
2373}
2374
2375void TraceYoungGenTimeData::print_summary(const char* str,
2376                                          const NumberSeq* seq) const {
2377  double sum = seq->sum();
2378  tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
2379                str, sum / 1000.0, seq->avg());
2380}
2381
2382void TraceYoungGenTimeData::print_summary_sd(const char* str,
2383                                             const NumberSeq* seq) const {
2384  print_summary(str, seq);
2385  tty->print_cr("%45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
2386                "(num", seq->num(), seq->sd(), seq->maximum());
2387}
2388
2389void TraceYoungGenTimeData::print() const {
2390  if (!TraceYoungGenTime) {
2391    return;
2392  }
2393
2394  tty->print_cr("ALL PAUSES");
2395  print_summary_sd("   Total", &_total);
2396  tty->cr();
2397  tty->cr();
2398  tty->print_cr("   Young GC Pauses: %8d", _young_pause_num);
2399  tty->print_cr("   Mixed GC Pauses: %8d", _mixed_pause_num);
2400  tty->cr();
2401
2402  tty->print_cr("EVACUATION PAUSES");
2403
2404  if (_young_pause_num == 0 && _mixed_pause_num == 0) {
2405    tty->print_cr("none");
2406  } else {
2407    print_summary_sd("   Evacuation Pauses", &_total);
2408    print_summary("      Root Region Scan Wait", &_root_region_scan_wait);
2409    print_summary("      Parallel Time", &_parallel);
2410    print_summary("         Ext Root Scanning", &_ext_root_scan);
2411    print_summary("         SATB Filtering", &_satb_filtering);
2412    print_summary("         Update RS", &_update_rs);
2413    print_summary("         Scan RS", &_scan_rs);
2414    print_summary("         Object Copy", &_obj_copy);
2415    print_summary("         Termination", &_termination);
2416    print_summary("         Parallel Other", &_parallel_other);
2417    print_summary("      Clear CT", &_clear_ct);
2418    print_summary("      Other", &_other);
2419  }
2420  tty->cr();
2421
2422  tty->print_cr("MISC");
2423  print_summary_sd("   Stop World", &_all_stop_world_times_ms);
2424  print_summary_sd("   Yields", &_all_yield_times_ms);
2425}
2426
2427void TraceOldGenTimeData::record_full_collection(double full_gc_time_ms) {
2428  if (TraceOldGenTime) {
2429    _all_full_gc_times.add(full_gc_time_ms);
2430  }
2431}
2432
2433void TraceOldGenTimeData::print() const {
2434  if (!TraceOldGenTime) {
2435    return;
2436  }
2437
2438  if (_all_full_gc_times.num() > 0) {
2439    tty->print("\n%4d full_gcs: total time = %8.2f s",
2440      _all_full_gc_times.num(),
2441      _all_full_gc_times.sum() / 1000.0);
2442    tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg());
2443    tty->print_cr("                     [std. dev = %8.2f ms, max = %8.2f ms]",
2444      _all_full_gc_times.sd(),
2445      _all_full_gc_times.maximum());
2446  }
2447}
2448