g1CollectorPolicy.hpp revision 9727:f944761a3ce3
1/*
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP
26#define SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP
27
28#include "gc/g1/collectionSetChooser.hpp"
29#include "gc/g1/g1CollectorState.hpp"
30#include "gc/g1/g1GCPhaseTimes.hpp"
31#include "gc/g1/g1InCSetState.hpp"
32#include "gc/g1/g1InitialMarkToMixedTimeTracker.hpp"
33#include "gc/g1/g1MMUTracker.hpp"
34#include "gc/g1/g1Predictions.hpp"
35#include "gc/shared/collectorPolicy.hpp"
36#include "utilities/pair.hpp"
37
38// A G1CollectorPolicy makes policy decisions that determine the
39// characteristics of the collector.  Examples include:
40//   * choice of collection set.
41//   * when to collect.
42
43class HeapRegion;
44class CollectionSetChooser;
45class G1IHOPControl;
46
47// TraceYoungGenTime collects data on _both_ young and mixed evacuation pauses
48// (the latter may contain non-young regions - i.e. regions that are
49// technically in old) while TraceOldGenTime collects data about full GCs.
50class TraceYoungGenTimeData : public CHeapObj<mtGC> {
51 private:
52  unsigned  _young_pause_num;
53  unsigned  _mixed_pause_num;
54
55  NumberSeq _all_stop_world_times_ms;
56  NumberSeq _all_yield_times_ms;
57
58  NumberSeq _total;
59  NumberSeq _other;
60  NumberSeq _root_region_scan_wait;
61  NumberSeq _parallel;
62  NumberSeq _ext_root_scan;
63  NumberSeq _satb_filtering;
64  NumberSeq _update_rs;
65  NumberSeq _scan_rs;
66  NumberSeq _obj_copy;
67  NumberSeq _termination;
68  NumberSeq _parallel_other;
69  NumberSeq _clear_ct;
70
71  void print_summary(const char* str, const NumberSeq* seq) const;
72  void print_summary_sd(const char* str, const NumberSeq* seq) const;
73
74public:
75   TraceYoungGenTimeData() : _young_pause_num(0), _mixed_pause_num(0) {};
76  void record_start_collection(double time_to_stop_the_world_ms);
77  void record_yield_time(double yield_time_ms);
78  void record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times);
79  void increment_young_collection_count();
80  void increment_mixed_collection_count();
81  void print() const;
82};
83
84class TraceOldGenTimeData : public CHeapObj<mtGC> {
85 private:
86  NumberSeq _all_full_gc_times;
87
88 public:
89  void record_full_collection(double full_gc_time_ms);
90  void print() const;
91};
92
93// There are three command line options related to the young gen size:
94// NewSize, MaxNewSize and NewRatio (There is also -Xmn, but that is
95// just a short form for NewSize==MaxNewSize). G1 will use its internal
96// heuristics to calculate the actual young gen size, so these options
97// basically only limit the range within which G1 can pick a young gen
98// size. Also, these are general options taking byte sizes. G1 will
99// internally work with a number of regions instead. So, some rounding
100// will occur.
101//
102// If nothing related to the the young gen size is set on the command
103// line we should allow the young gen to be between G1NewSizePercent
104// and G1MaxNewSizePercent of the heap size. This means that every time
105// the heap size changes, the limits for the young gen size will be
106// recalculated.
107//
108// If only -XX:NewSize is set we should use the specified value as the
109// minimum size for young gen. Still using G1MaxNewSizePercent of the
110// heap as maximum.
111//
112// If only -XX:MaxNewSize is set we should use the specified value as the
113// maximum size for young gen. Still using G1NewSizePercent of the heap
114// as minimum.
115//
116// If -XX:NewSize and -XX:MaxNewSize are both specified we use these values.
117// No updates when the heap size changes. There is a special case when
118// NewSize==MaxNewSize. This is interpreted as "fixed" and will use a
119// different heuristic for calculating the collection set when we do mixed
120// collection.
121//
122// If only -XX:NewRatio is set we should use the specified ratio of the heap
123// as both min and max. This will be interpreted as "fixed" just like the
124// NewSize==MaxNewSize case above. But we will update the min and max
125// every time the heap size changes.
126//
127// NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is
128// combined with either NewSize or MaxNewSize. (A warning message is printed.)
129class G1YoungGenSizer : public CHeapObj<mtGC> {
130private:
131  enum SizerKind {
132    SizerDefaults,
133    SizerNewSizeOnly,
134    SizerMaxNewSizeOnly,
135    SizerMaxAndNewSize,
136    SizerNewRatio
137  };
138  SizerKind _sizer_kind;
139  uint _min_desired_young_length;
140  uint _max_desired_young_length;
141  bool _adaptive_size;
142  uint calculate_default_min_length(uint new_number_of_heap_regions);
143  uint calculate_default_max_length(uint new_number_of_heap_regions);
144
145  // Update the given values for minimum and maximum young gen length in regions
146  // given the number of heap regions depending on the kind of sizing algorithm.
147  void recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length);
148
149public:
150  G1YoungGenSizer();
151  // Calculate the maximum length of the young gen given the number of regions
152  // depending on the sizing algorithm.
153  uint max_young_length(uint number_of_heap_regions);
154
155  void heap_size_changed(uint new_number_of_heap_regions);
156  uint min_desired_young_length() {
157    return _min_desired_young_length;
158  }
159  uint max_desired_young_length() {
160    return _max_desired_young_length;
161  }
162
163  bool adaptive_young_list_length() const {
164    return _adaptive_size;
165  }
166};
167
168class G1CollectorPolicy: public CollectorPolicy {
169 private:
170  G1IHOPControl* _ihop_control;
171
172  G1IHOPControl* create_ihop_control() const;
173  // Update the IHOP control with necessary statistics.
174  void update_ihop_prediction(double mutator_time_s,
175                              size_t mutator_alloc_bytes,
176                              size_t young_gen_size);
177  void report_ihop_statistics();
178
179  G1Predictions _predictor;
180
181  double get_new_prediction(TruncatedSeq const* seq) const;
182
183  // either equal to the number of parallel threads, if ParallelGCThreads
184  // has been set, or 1 otherwise
185  int _parallel_gc_threads;
186
187  // The number of GC threads currently active.
188  uintx _no_of_gc_threads;
189
190  G1MMUTracker* _mmu_tracker;
191
192  void initialize_alignments();
193  void initialize_flags();
194
195  CollectionSetChooser* _cset_chooser;
196
197  double _full_collection_start_sec;
198
199  // These exclude marking times.
200  TruncatedSeq* _recent_gc_times_ms;
201
202  TruncatedSeq* _concurrent_mark_remark_times_ms;
203  TruncatedSeq* _concurrent_mark_cleanup_times_ms;
204
205  // Ratio check data for determining if heap growth is necessary.
206  uint _ratio_over_threshold_count;
207  double _ratio_over_threshold_sum;
208  uint _pauses_since_start;
209
210  TraceYoungGenTimeData _trace_young_gen_time_data;
211  TraceOldGenTimeData   _trace_old_gen_time_data;
212
213  double _stop_world_start;
214
215  uint _young_list_target_length;
216  uint _young_list_fixed_length;
217
218  // The max number of regions we can extend the eden by while the GC
219  // locker is active. This should be >= _young_list_target_length;
220  uint _young_list_max_length;
221
222  SurvRateGroup* _short_lived_surv_rate_group;
223  SurvRateGroup* _survivor_surv_rate_group;
224  // add here any more surv rate groups
225
226  double _gc_overhead_perc;
227
228  double _reserve_factor;
229  uint   _reserve_regions;
230
231  enum PredictionConstants {
232    TruncatedSeqLength = 10,
233    NumPrevPausesForHeuristics = 10,
234    // MinOverThresholdForGrowth must be less than NumPrevPausesForHeuristics,
235    // representing the minimum number of pause time ratios that exceed
236    // GCTimeRatio before a heap expansion will be triggered.
237    MinOverThresholdForGrowth = 4
238  };
239
240  TruncatedSeq* _alloc_rate_ms_seq;
241  double        _prev_collection_pause_end_ms;
242
243  TruncatedSeq* _rs_length_diff_seq;
244  TruncatedSeq* _cost_per_card_ms_seq;
245  TruncatedSeq* _cost_scan_hcc_seq;
246  TruncatedSeq* _young_cards_per_entry_ratio_seq;
247  TruncatedSeq* _mixed_cards_per_entry_ratio_seq;
248  TruncatedSeq* _cost_per_entry_ms_seq;
249  TruncatedSeq* _mixed_cost_per_entry_ms_seq;
250  TruncatedSeq* _cost_per_byte_ms_seq;
251  TruncatedSeq* _constant_other_time_ms_seq;
252  TruncatedSeq* _young_other_cost_per_region_ms_seq;
253  TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
254
255  TruncatedSeq* _pending_cards_seq;
256  TruncatedSeq* _rs_lengths_seq;
257
258  TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
259
260  G1YoungGenSizer* _young_gen_sizer;
261
262  uint _eden_cset_region_length;
263  uint _survivor_cset_region_length;
264  uint _old_cset_region_length;
265
266  void init_cset_region_lengths(uint eden_cset_region_length,
267                                uint survivor_cset_region_length);
268
269  uint eden_cset_region_length() const     { return _eden_cset_region_length;     }
270  uint survivor_cset_region_length() const { return _survivor_cset_region_length; }
271  uint old_cset_region_length() const      { return _old_cset_region_length;      }
272
273  uint _free_regions_at_end_of_collection;
274
275  size_t _recorded_rs_lengths;
276  size_t _max_rs_lengths;
277
278  size_t _rs_lengths_prediction;
279
280#ifndef PRODUCT
281  bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
282#endif // PRODUCT
283
284  void adjust_concurrent_refinement(double update_rs_time,
285                                    double update_rs_processed_buffers,
286                                    double goal_ms);
287
288  uintx no_of_gc_threads() { return _no_of_gc_threads; }
289  void set_no_of_gc_threads(uintx v) { _no_of_gc_threads = v; }
290
291  double _pause_time_target_ms;
292
293  size_t _pending_cards;
294
295  // The amount of allocated bytes in old gen during the last mutator and the following
296  // young GC phase.
297  size_t _bytes_allocated_in_old_since_last_gc;
298
299  G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed;
300public:
301  const G1Predictions& predictor() const { return _predictor; }
302
303  // Add the given number of bytes to the total number of allocated bytes in the old gen.
304  void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; }
305
306  // Accessors
307
308  void set_region_eden(HeapRegion* hr, int young_index_in_cset) {
309    hr->set_eden();
310    hr->install_surv_rate_group(_short_lived_surv_rate_group);
311    hr->set_young_index_in_cset(young_index_in_cset);
312  }
313
314  void set_region_survivor(HeapRegion* hr, int young_index_in_cset) {
315    assert(hr->is_survivor(), "pre-condition");
316    hr->install_surv_rate_group(_survivor_surv_rate_group);
317    hr->set_young_index_in_cset(young_index_in_cset);
318  }
319
320#ifndef PRODUCT
321  bool verify_young_ages();
322#endif // PRODUCT
323
324  void record_max_rs_lengths(size_t rs_lengths) {
325    _max_rs_lengths = rs_lengths;
326  }
327
328  size_t predict_rs_length_diff() const;
329
330  double predict_alloc_rate_ms() const;
331
332  double predict_cost_per_card_ms() const;
333
334  double predict_scan_hcc_ms() const;
335
336  double predict_rs_update_time_ms(size_t pending_cards) const;
337
338  double predict_young_cards_per_entry_ratio() const;
339
340  double predict_mixed_cards_per_entry_ratio() const;
341
342  size_t predict_young_card_num(size_t rs_length) const;
343
344  size_t predict_non_young_card_num(size_t rs_length) const;
345
346  double predict_rs_scan_time_ms(size_t card_num) const;
347
348  double predict_mixed_rs_scan_time_ms(size_t card_num) const;
349
350  double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const;
351
352  double predict_object_copy_time_ms(size_t bytes_to_copy) const;
353
354  double predict_constant_other_time_ms() const;
355
356  double predict_young_other_time_ms(size_t young_num) const;
357
358  double predict_non_young_other_time_ms(size_t non_young_num) const;
359
360  double predict_base_elapsed_time_ms(size_t pending_cards) const;
361  double predict_base_elapsed_time_ms(size_t pending_cards,
362                                      size_t scanned_cards) const;
363  size_t predict_bytes_to_copy(HeapRegion* hr) const;
364  double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc) const;
365
366  void set_recorded_rs_lengths(size_t rs_lengths);
367
368  uint cset_region_length() const       { return young_cset_region_length() +
369                                           old_cset_region_length(); }
370  uint young_cset_region_length() const { return eden_cset_region_length() +
371                                           survivor_cset_region_length(); }
372
373  double predict_survivor_regions_evac_time() const;
374
375  bool should_update_surv_rate_group_predictors() {
376    return collector_state()->last_gc_was_young() && !collector_state()->in_marking_window();
377  }
378
379  void cset_regions_freed() {
380    bool update = should_update_surv_rate_group_predictors();
381
382    _short_lived_surv_rate_group->all_surviving_words_recorded(update);
383    _survivor_surv_rate_group->all_surviving_words_recorded(update);
384  }
385
386  G1MMUTracker* mmu_tracker() {
387    return _mmu_tracker;
388  }
389
390  const G1MMUTracker* mmu_tracker() const {
391    return _mmu_tracker;
392  }
393
394  double max_pause_time_ms() const {
395    return _mmu_tracker->max_gc_time() * 1000.0;
396  }
397
398  double predict_remark_time_ms() const;
399
400  double predict_cleanup_time_ms() const;
401
402  // Returns an estimate of the survival rate of the region at yg-age
403  // "yg_age".
404  double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const;
405
406  double predict_yg_surv_rate(int age) const;
407
408  double accum_yg_surv_rate_pred(int age) const;
409
410protected:
411  virtual double average_time_ms(G1GCPhaseTimes::GCParPhases phase) const;
412  virtual double other_time_ms(double pause_time_ms) const;
413
414  double young_other_time_ms() const;
415  double non_young_other_time_ms() const;
416  double constant_other_time_ms(double pause_time_ms) const;
417
418  CollectionSetChooser* cset_chooser() const {
419    return _cset_chooser;
420  }
421
422private:
423  // Statistics kept per GC stoppage, pause or full.
424  TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
425
426  // Add a new GC of the given duration and end time to the record.
427  void update_recent_gc_times(double end_time_sec, double elapsed_ms);
428
429  // The head of the list (via "next_in_collection_set()") representing the
430  // current collection set. Set from the incrementally built collection
431  // set at the start of the pause.
432  HeapRegion* _collection_set;
433
434  // The number of bytes in the collection set before the pause. Set from
435  // the incrementally built collection set at the start of an evacuation
436  // pause, and incremented in finalize_old_cset_part() when adding old regions
437  // (if any) to the collection set.
438  size_t _collection_set_bytes_used_before;
439
440  // The number of bytes copied during the GC.
441  size_t _bytes_copied_during_gc;
442
443  // The associated information that is maintained while the incremental
444  // collection set is being built with young regions. Used to populate
445  // the recorded info for the evacuation pause.
446
447  enum CSetBuildType {
448    Active,             // We are actively building the collection set
449    Inactive            // We are not actively building the collection set
450  };
451
452  CSetBuildType _inc_cset_build_state;
453
454  // The head of the incrementally built collection set.
455  HeapRegion* _inc_cset_head;
456
457  // The tail of the incrementally built collection set.
458  HeapRegion* _inc_cset_tail;
459
460  // The number of bytes in the incrementally built collection set.
461  // Used to set _collection_set_bytes_used_before at the start of
462  // an evacuation pause.
463  size_t _inc_cset_bytes_used_before;
464
465  // Used to record the highest end of heap region in collection set
466  HeapWord* _inc_cset_max_finger;
467
468  // The RSet lengths recorded for regions in the CSet. It is updated
469  // by the thread that adds a new region to the CSet. We assume that
470  // only one thread can be allocating a new CSet region (currently,
471  // it does so after taking the Heap_lock) hence no need to
472  // synchronize updates to this field.
473  size_t _inc_cset_recorded_rs_lengths;
474
475  // A concurrent refinement thread periodically samples the young
476  // region RSets and needs to update _inc_cset_recorded_rs_lengths as
477  // the RSets grow. Instead of having to synchronize updates to that
478  // field we accumulate them in this field and add it to
479  // _inc_cset_recorded_rs_lengths_diffs at the start of a GC.
480  ssize_t _inc_cset_recorded_rs_lengths_diffs;
481
482  // The predicted elapsed time it will take to collect the regions in
483  // the CSet. This is updated by the thread that adds a new region to
484  // the CSet. See the comment for _inc_cset_recorded_rs_lengths about
485  // MT-safety assumptions.
486  double _inc_cset_predicted_elapsed_time_ms;
487
488  // See the comment for _inc_cset_recorded_rs_lengths_diffs.
489  double _inc_cset_predicted_elapsed_time_ms_diffs;
490
491  // Stash a pointer to the g1 heap.
492  G1CollectedHeap* _g1;
493
494  G1GCPhaseTimes* _phase_times;
495
496  // The ratio of gc time to elapsed time, computed over recent pauses,
497  // and the ratio for just the last pause.
498  double _recent_avg_pause_time_ratio;
499  double _last_pause_time_ratio;
500
501  double recent_avg_pause_time_ratio() const {
502    return _recent_avg_pause_time_ratio;
503  }
504
505  // This set of variables tracks the collector efficiency, in order to
506  // determine whether we should initiate a new marking.
507  double _mark_remark_start_sec;
508  double _mark_cleanup_start_sec;
509
510  // Updates the internal young list maximum and target lengths. Returns the
511  // unbounded young list target length.
512  uint update_young_list_max_and_target_length();
513  uint update_young_list_max_and_target_length(size_t rs_lengths);
514
515  // Update the young list target length either by setting it to the
516  // desired fixed value or by calculating it using G1's pause
517  // prediction model. If no rs_lengths parameter is passed, predict
518  // the RS lengths using the prediction model, otherwise use the
519  // given rs_lengths as the prediction.
520  // Returns the unbounded young list target length.
521  uint update_young_list_target_length(size_t rs_lengths);
522
523  // Calculate and return the minimum desired young list target
524  // length. This is the minimum desired young list length according
525  // to the user's inputs.
526  uint calculate_young_list_desired_min_length(uint base_min_length) const;
527
528  // Calculate and return the maximum desired young list target
529  // length. This is the maximum desired young list length according
530  // to the user's inputs.
531  uint calculate_young_list_desired_max_length() const;
532
533  // Calculate and return the maximum young list target length that
534  // can fit into the pause time goal. The parameters are: rs_lengths
535  // represent the prediction of how large the young RSet lengths will
536  // be, base_min_length is the already existing number of regions in
537  // the young list, min_length and max_length are the desired min and
538  // max young list length according to the user's inputs.
539  uint calculate_young_list_target_length(size_t rs_lengths,
540                                          uint base_min_length,
541                                          uint desired_min_length,
542                                          uint desired_max_length) const;
543
544  // Result of the bounded_young_list_target_length() method, containing both the
545  // bounded as well as the unbounded young list target lengths in this order.
546  typedef Pair<uint, uint, StackObj> YoungTargetLengths;
547  YoungTargetLengths young_list_target_lengths(size_t rs_lengths) const;
548
549  void update_rs_lengths_prediction();
550  void update_rs_lengths_prediction(size_t prediction);
551
552  // Calculate and return chunk size (in number of regions) for parallel
553  // concurrent mark cleanup.
554  uint calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const;
555
556  // Check whether a given young length (young_length) fits into the
557  // given target pause time and whether the prediction for the amount
558  // of objects to be copied for the given length will fit into the
559  // given free space (expressed by base_free_regions).  It is used by
560  // calculate_young_list_target_length().
561  bool predict_will_fit(uint young_length, double base_time_ms,
562                        uint base_free_regions, double target_pause_time_ms) const;
563
564  // Calculate the minimum number of old regions we'll add to the CSet
565  // during a mixed GC.
566  uint calc_min_old_cset_length() const;
567
568  // Calculate the maximum number of old regions we'll add to the CSet
569  // during a mixed GC.
570  uint calc_max_old_cset_length() const;
571
572  // Returns the given amount of uncollected reclaimable space
573  // as a percentage of the current heap capacity.
574  double reclaimable_bytes_perc(size_t reclaimable_bytes) const;
575
576  // Sets up marking if proper conditions are met.
577  void maybe_start_marking();
578
579  // The kind of STW pause.
580  enum PauseKind {
581    FullGC,
582    YoungOnlyGC,
583    MixedGC,
584    LastYoungGC,
585    InitialMarkGC,
586    Cleanup,
587    Remark
588  };
589
590  // Calculate PauseKind from internal state.
591  PauseKind young_gc_pause_kind() const;
592  // Record the given STW pause with the given start and end times (in s).
593  void record_pause(PauseKind kind, double start, double end);
594  // Indicate that we aborted marking before doing any mixed GCs.
595  void abort_time_to_mixed_tracking();
596public:
597
598  G1CollectorPolicy();
599
600  virtual ~G1CollectorPolicy();
601
602  virtual G1CollectorPolicy* as_g1_policy() { return this; }
603
604  G1CollectorState* collector_state() const;
605
606  G1GCPhaseTimes* phase_times() const { return _phase_times; }
607
608  // Check the current value of the young list RSet lengths and
609  // compare it against the last prediction. If the current value is
610  // higher, recalculate the young list target length prediction.
611  void revise_young_list_target_length_if_necessary();
612
613  // This should be called after the heap is resized.
614  void record_new_heap_size(uint new_number_of_regions);
615
616  void init();
617
618  virtual void note_gc_start(uint num_active_workers);
619
620  // Create jstat counters for the policy.
621  virtual void initialize_gc_policy_counters();
622
623  virtual HeapWord* mem_allocate_work(size_t size,
624                                      bool is_tlab,
625                                      bool* gc_overhead_limit_was_exceeded);
626
627  // This method controls how a collector handles one or more
628  // of its generations being fully allocated.
629  virtual HeapWord* satisfy_failed_allocation(size_t size,
630                                              bool is_tlab);
631
632  bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
633
634  bool about_to_start_mixed_phase() const;
635
636  // Record the start and end of an evacuation pause.
637  void record_collection_pause_start(double start_time_sec);
638  void record_collection_pause_end(double pause_time_ms, size_t cards_scanned);
639
640  // Record the start and end of a full collection.
641  void record_full_collection_start();
642  void record_full_collection_end();
643
644  // Must currently be called while the world is stopped.
645  void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);
646
647  // Record start and end of remark.
648  void record_concurrent_mark_remark_start();
649  void record_concurrent_mark_remark_end();
650
651  // Record start, end, and completion of cleanup.
652  void record_concurrent_mark_cleanup_start();
653  void record_concurrent_mark_cleanup_end();
654  void record_concurrent_mark_cleanup_completed();
655
656  // Records the information about the heap size for reporting in
657  // print_detailed_heap_transition
658  void record_heap_size_info_at_start(bool full);
659
660  // Print heap sizing transition (with less and more detail).
661
662  void print_detailed_heap_transition() const;
663
664  virtual void print_phases(double pause_time_sec);
665
666  void record_stop_world_start();
667  void record_concurrent_pause();
668
669  // Record how much space we copied during a GC. This is typically
670  // called when a GC alloc region is being retired.
671  void record_bytes_copied_during_gc(size_t bytes) {
672    _bytes_copied_during_gc += bytes;
673  }
674
675  // The amount of space we copied during a GC.
676  size_t bytes_copied_during_gc() const {
677    return _bytes_copied_during_gc;
678  }
679
680  size_t collection_set_bytes_used_before() const {
681    return _collection_set_bytes_used_before;
682  }
683
684  // Determine whether there are candidate regions so that the
685  // next GC should be mixed. The two action strings are used
686  // in the ergo output when the method returns true or false.
687  bool next_gc_should_be_mixed(const char* true_action_str,
688                               const char* false_action_str) const;
689
690  // Choose a new collection set.  Marks the chosen regions as being
691  // "in_collection_set", and links them together.  The head and number of
692  // the collection set are available via access methods.
693  double finalize_young_cset_part(double target_pause_time_ms);
694  virtual void finalize_old_cset_part(double time_remaining_ms);
695
696  // The head of the list (via "next_in_collection_set()") representing the
697  // current collection set.
698  HeapRegion* collection_set() { return _collection_set; }
699
700  void clear_collection_set() { _collection_set = NULL; }
701
702  // Add old region "hr" to the CSet.
703  void add_old_region_to_cset(HeapRegion* hr);
704
705  // Incremental CSet Support
706
707  // The head of the incrementally built collection set.
708  HeapRegion* inc_cset_head() { return _inc_cset_head; }
709
710  // The tail of the incrementally built collection set.
711  HeapRegion* inc_set_tail() { return _inc_cset_tail; }
712
713  // Initialize incremental collection set info.
714  void start_incremental_cset_building();
715
716  // Perform any final calculations on the incremental CSet fields
717  // before we can use them.
718  void finalize_incremental_cset_building();
719
720  void clear_incremental_cset() {
721    _inc_cset_head = NULL;
722    _inc_cset_tail = NULL;
723  }
724
725  // Stop adding regions to the incremental collection set
726  void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
727
728  // Add information about hr to the aggregated information for the
729  // incrementally built collection set.
730  void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
731
732  // Update information about hr in the aggregated information for
733  // the incrementally built collection set.
734  void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
735
736private:
737  // Update the incremental cset information when adding a region
738  // (should not be called directly).
739  void add_region_to_incremental_cset_common(HeapRegion* hr);
740
741  // Set the state to start a concurrent marking cycle and clear
742  // _initiate_conc_mark_if_possible because it has now been
743  // acted on.
744  void initiate_conc_mark();
745
746public:
747  // Add hr to the LHS of the incremental collection set.
748  void add_region_to_incremental_cset_lhs(HeapRegion* hr);
749
750  // Add hr to the RHS of the incremental collection set.
751  void add_region_to_incremental_cset_rhs(HeapRegion* hr);
752
753#ifndef PRODUCT
754  void print_collection_set(HeapRegion* list_head, outputStream* st);
755#endif // !PRODUCT
756
757  // This sets the initiate_conc_mark_if_possible() flag to start a
758  // new cycle, as long as we are not already in one. It's best if it
759  // is called during a safepoint when the test whether a cycle is in
760  // progress or not is stable.
761  bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
762
763  // This is called at the very beginning of an evacuation pause (it
764  // has to be the first thing that the pause does). If
765  // initiate_conc_mark_if_possible() is true, and the concurrent
766  // marking thread has completed its work during the previous cycle,
767  // it will set during_initial_mark_pause() to so that the pause does
768  // the initial-mark work and start a marking cycle.
769  void decide_on_conc_mark_initiation();
770
771  // If an expansion would be appropriate, because recent GC overhead had
772  // exceeded the desired limit, return an amount to expand by.
773  virtual size_t expansion_amount();
774
775  // Clear ratio tracking data used by expansion_amount().
776  void clear_ratio_check_data();
777
778  // Print tracing information.
779  void print_tracing_info() const;
780
781  // Print stats on young survival ratio
782  void print_yg_surv_rate_info() const;
783
784  void finished_recalculating_age_indexes(bool is_survivors) {
785    if (is_survivors) {
786      _survivor_surv_rate_group->finished_recalculating_age_indexes();
787    } else {
788      _short_lived_surv_rate_group->finished_recalculating_age_indexes();
789    }
790    // do that for any other surv rate groups
791  }
792
793  size_t young_list_target_length() const { return _young_list_target_length; }
794
795  bool is_young_list_full() const;
796
797  bool can_expand_young_list() const;
798
799  uint young_list_max_length() const {
800    return _young_list_max_length;
801  }
802
803  bool adaptive_young_list_length() const {
804    return _young_gen_sizer->adaptive_young_list_length();
805  }
806
807  virtual bool should_process_references() const {
808    return true;
809  }
810
811private:
812  //
813  // Survivor regions policy.
814  //
815
816  // Current tenuring threshold, set to 0 if the collector reaches the
817  // maximum amount of survivors regions.
818  uint _tenuring_threshold;
819
820  // The limit on the number of regions allocated for survivors.
821  uint _max_survivor_regions;
822
823  // For reporting purposes.
824  // The value of _heap_bytes_before_gc is also used to calculate
825  // the cost of copying.
826
827  size_t _eden_used_bytes_before_gc;         // Eden occupancy before GC
828  size_t _survivor_used_bytes_before_gc;     // Survivor occupancy before GC
829  size_t _old_used_bytes_before_gc;          // Old occupancy before GC
830  size_t _humongous_used_bytes_before_gc;    // Humongous occupancy before GC
831  size_t _heap_used_bytes_before_gc;         // Heap occupancy before GC
832  size_t _metaspace_used_bytes_before_gc;    // Metaspace occupancy before GC
833
834  size_t _eden_capacity_bytes_before_gc;     // Eden capacity before GC
835  size_t _heap_capacity_bytes_before_gc;     // Heap capacity before GC
836
837  // The amount of survivor regions after a collection.
838  uint _recorded_survivor_regions;
839  // List of survivor regions.
840  HeapRegion* _recorded_survivor_head;
841  HeapRegion* _recorded_survivor_tail;
842
843  ageTable _survivors_age_table;
844
845public:
846  uint tenuring_threshold() const { return _tenuring_threshold; }
847
848  static const uint REGIONS_UNLIMITED = (uint) -1;
849
850  uint max_regions(InCSetState dest) const {
851    switch (dest.value()) {
852      case InCSetState::Young:
853        return _max_survivor_regions;
854      case InCSetState::Old:
855        return REGIONS_UNLIMITED;
856      default:
857        assert(false, "Unknown dest state: " CSETSTATE_FORMAT, dest.value());
858        break;
859    }
860    // keep some compilers happy
861    return 0;
862  }
863
864  void note_start_adding_survivor_regions() {
865    _survivor_surv_rate_group->start_adding_regions();
866  }
867
868  void note_stop_adding_survivor_regions() {
869    _survivor_surv_rate_group->stop_adding_regions();
870  }
871
872  void record_survivor_regions(uint regions,
873                               HeapRegion* head,
874                               HeapRegion* tail) {
875    _recorded_survivor_regions = regions;
876    _recorded_survivor_head    = head;
877    _recorded_survivor_tail    = tail;
878  }
879
880  uint recorded_survivor_regions() const {
881    return _recorded_survivor_regions;
882  }
883
884  void record_age_table(ageTable* age_table) {
885    _survivors_age_table.merge(age_table);
886  }
887
888  void update_max_gc_locker_expansion();
889
890  // Calculates survivor space parameters.
891  void update_survivors_policy();
892
893  virtual void post_heap_initialize();
894};
895
896#endif // SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP
897