collectorPolicy.hpp revision 8413:92457dfb91bd
1/*
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_GC_SHARED_COLLECTORPOLICY_HPP
26#define SHARE_VM_GC_SHARED_COLLECTORPOLICY_HPP
27
28#include "gc/shared/barrierSet.hpp"
29#include "gc/shared/genRemSet.hpp"
30#include "gc/shared/generationSpec.hpp"
31#include "memory/allocation.hpp"
32#include "utilities/macros.hpp"
33
34// This class (or more correctly, subtypes of this class)
35// are used to define global garbage collector attributes.
36// This includes initialization of generations and any other
37// shared resources they may need.
38//
39// In general, all flag adjustment and validation should be
40// done in initialize_flags(), which is called prior to
41// initialize_size_info().
42//
43// This class is not fully developed yet. As more collector(s)
44// are added, it is expected that we will come across further
45// behavior that requires global attention. The correct place
46// to deal with those issues is this class.
47
48// Forward declarations.
49class GenCollectorPolicy;
50class AdaptiveSizePolicy;
51#if INCLUDE_ALL_GCS
52class ConcurrentMarkSweepPolicy;
53class G1CollectorPolicy;
54#endif // INCLUDE_ALL_GCS
55
56class GCPolicyCounters;
57class MarkSweepPolicy;
58
59class CollectorPolicy : public CHeapObj<mtGC> {
60 protected:
61  GCPolicyCounters* _gc_policy_counters;
62
63  virtual void initialize_alignments() = 0;
64  virtual void initialize_flags();
65  virtual void initialize_size_info();
66
67  DEBUG_ONLY(virtual void assert_flags();)
68  DEBUG_ONLY(virtual void assert_size_info();)
69
70  size_t _initial_heap_byte_size;
71  size_t _max_heap_byte_size;
72  size_t _min_heap_byte_size;
73
74  size_t _space_alignment;
75  size_t _heap_alignment;
76
77  // Needed to keep information if MaxHeapSize was set on the command line
78  // when the flag value is aligned etc by ergonomics.
79  bool _max_heap_size_cmdline;
80
81  // The sizing of the heap is controlled by a sizing policy.
82  AdaptiveSizePolicy* _size_policy;
83
84  // Set to true when policy wants soft refs cleared.
85  // Reset to false by gc after it clears all soft refs.
86  bool _should_clear_all_soft_refs;
87
88  // Set to true by the GC if the just-completed gc cleared all
89  // softrefs.  This is set to true whenever a gc clears all softrefs, and
90  // set to false each time gc returns to the mutator.  For example, in the
91  // ParallelScavengeHeap case the latter would be done toward the end of
92  // mem_allocate() where it returns op.result()
93  bool _all_soft_refs_clear;
94
95  CollectorPolicy();
96
97 public:
98  virtual void initialize_all() {
99    initialize_alignments();
100    initialize_flags();
101    initialize_size_info();
102  }
103
104  // Return maximum heap alignment that may be imposed by the policy.
105  static size_t compute_heap_alignment();
106
107  size_t space_alignment()        { return _space_alignment; }
108  size_t heap_alignment()         { return _heap_alignment; }
109
110  size_t initial_heap_byte_size() { return _initial_heap_byte_size; }
111  size_t max_heap_byte_size()     { return _max_heap_byte_size; }
112  size_t min_heap_byte_size()     { return _min_heap_byte_size; }
113
114  enum Name {
115    CollectorPolicyKind,
116    GenCollectorPolicyKind,
117    ConcurrentMarkSweepPolicyKind,
118    G1CollectorPolicyKind
119  };
120
121  AdaptiveSizePolicy* size_policy() { return _size_policy; }
122  bool should_clear_all_soft_refs() { return _should_clear_all_soft_refs; }
123  void set_should_clear_all_soft_refs(bool v) { _should_clear_all_soft_refs = v; }
124  // Returns the current value of _should_clear_all_soft_refs.
125  // _should_clear_all_soft_refs is set to false as a side effect.
126  bool use_should_clear_all_soft_refs(bool v);
127  bool all_soft_refs_clear() { return _all_soft_refs_clear; }
128  void set_all_soft_refs_clear(bool v) { _all_soft_refs_clear = v; }
129
130  // Called by the GC after Soft Refs have been cleared to indicate
131  // that the request in _should_clear_all_soft_refs has been fulfilled.
132  void cleared_all_soft_refs();
133
134  // Identification methods.
135  virtual GenCollectorPolicy*           as_generation_policy()            { return NULL; }
136  virtual MarkSweepPolicy*              as_mark_sweep_policy()            { return NULL; }
137#if INCLUDE_ALL_GCS
138  virtual ConcurrentMarkSweepPolicy*    as_concurrent_mark_sweep_policy() { return NULL; }
139  virtual G1CollectorPolicy*            as_g1_policy()                    { return NULL; }
140#endif // INCLUDE_ALL_GCS
141  // Note that these are not virtual.
142  bool is_generation_policy()            { return as_generation_policy() != NULL; }
143  bool is_mark_sweep_policy()            { return as_mark_sweep_policy() != NULL; }
144#if INCLUDE_ALL_GCS
145  bool is_concurrent_mark_sweep_policy() { return as_concurrent_mark_sweep_policy() != NULL; }
146  bool is_g1_policy()                    { return as_g1_policy() != NULL; }
147#else  // INCLUDE_ALL_GCS
148  bool is_concurrent_mark_sweep_policy() { return false; }
149  bool is_g1_policy()                    { return false; }
150#endif // INCLUDE_ALL_GCS
151
152
153  virtual BarrierSet::Name barrier_set_name() = 0;
154
155  virtual GenRemSet* create_rem_set(MemRegion reserved);
156
157  // This method controls how a collector satisfies a request
158  // for a block of memory.  "gc_time_limit_was_exceeded" will
159  // be set to true if the adaptive size policy determine that
160  // an excessive amount of time is being spent doing collections
161  // and caused a NULL to be returned.  If a NULL is not returned,
162  // "gc_time_limit_was_exceeded" has an undefined meaning.
163  virtual HeapWord* mem_allocate_work(size_t size,
164                                      bool is_tlab,
165                                      bool* gc_overhead_limit_was_exceeded) = 0;
166
167  // This method controls how a collector handles one or more
168  // of its generations being fully allocated.
169  virtual HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab) = 0;
170  // This method controls how a collector handles a metadata allocation
171  // failure.
172  virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
173                                                       size_t size,
174                                                       Metaspace::MetadataType mdtype);
175
176  // Performance Counter support
177  GCPolicyCounters* counters()     { return _gc_policy_counters; }
178
179  // Create the jstat counters for the GC policy.  By default, policy's
180  // don't have associated counters, and we complain if this is invoked.
181  virtual void initialize_gc_policy_counters() {
182    ShouldNotReachHere();
183  }
184
185  virtual CollectorPolicy::Name kind() {
186    return CollectorPolicy::CollectorPolicyKind;
187  }
188
189  // Do any updates required to global flags that are due to heap initialization
190  // changes
191  virtual void post_heap_initialize() = 0;
192};
193
194class ClearedAllSoftRefs : public StackObj {
195  bool _clear_all_soft_refs;
196  CollectorPolicy* _collector_policy;
197 public:
198  ClearedAllSoftRefs(bool clear_all_soft_refs,
199                     CollectorPolicy* collector_policy) :
200    _clear_all_soft_refs(clear_all_soft_refs),
201    _collector_policy(collector_policy) {}
202
203  ~ClearedAllSoftRefs() {
204    if (_clear_all_soft_refs) {
205      _collector_policy->cleared_all_soft_refs();
206    }
207  }
208};
209
210class GenCollectorPolicy : public CollectorPolicy {
211  friend class TestGenCollectorPolicy;
212  friend class VMStructs;
213 protected:
214  size_t _min_young_size;
215  size_t _initial_young_size;
216  size_t _max_young_size;
217  size_t _min_old_size;
218  size_t _initial_old_size;
219  size_t _max_old_size;
220
221  // _gen_alignment and _space_alignment will have the same value most of the
222  // time. When using large pages they can differ.
223  size_t _gen_alignment;
224
225  GenerationSpec* _young_gen_spec;
226  GenerationSpec* _old_gen_spec;
227
228  // Return true if an allocation should be attempted in the older generation
229  // if it fails in the younger generation.  Return false, otherwise.
230  virtual bool should_try_older_generation_allocation(size_t word_size) const;
231
232  void initialize_flags();
233  void initialize_size_info();
234
235  DEBUG_ONLY(void assert_flags();)
236  DEBUG_ONLY(void assert_size_info();)
237
238  // Try to allocate space by expanding the heap.
239  virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
240
241  // Compute max heap alignment.
242  size_t compute_max_alignment();
243
244  // Scale the base_size by NewRatio according to
245  //     result = base_size / (NewRatio + 1)
246  // and align by min_alignment()
247  size_t scale_by_NewRatio_aligned(size_t base_size);
248
249  // Bound the value by the given maximum minus the min_alignment.
250  size_t bound_minus_alignment(size_t desired_size, size_t maximum_size);
251
252 public:
253  GenCollectorPolicy();
254
255  // Accessors
256  size_t min_young_size()     { return _min_young_size; }
257  size_t initial_young_size() { return _initial_young_size; }
258  size_t max_young_size()     { return _max_young_size; }
259  size_t gen_alignment()      { return _gen_alignment; }
260  size_t min_old_size()       { return _min_old_size; }
261  size_t initial_old_size()   { return _initial_old_size; }
262  size_t max_old_size()       { return _max_old_size; }
263
264  int number_of_generations() { return 2; }
265
266  GenerationSpec* young_gen_spec() const {
267    assert(_young_gen_spec != NULL, "_young_gen_spec should have been initialized");
268    return _young_gen_spec;
269  }
270
271  GenerationSpec* old_gen_spec() const {
272    assert(_old_gen_spec != NULL, "_old_gen_spec should have been initialized");
273    return _old_gen_spec;
274  }
275
276  virtual GenCollectorPolicy* as_generation_policy() { return this; }
277
278  virtual void initialize_generations() { };
279
280  virtual void initialize_all() {
281    CollectorPolicy::initialize_all();
282    initialize_generations();
283  }
284
285  size_t young_gen_size_lower_bound();
286
287  HeapWord* mem_allocate_work(size_t size,
288                              bool is_tlab,
289                              bool* gc_overhead_limit_was_exceeded);
290
291  HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab);
292
293  // Adaptive size policy
294  virtual void initialize_size_policy(size_t init_eden_size,
295                                      size_t init_promo_size,
296                                      size_t init_survivor_size);
297
298  virtual void post_heap_initialize() {
299    assert(_max_young_size == MaxNewSize, "Should be taken care of by initialize_size_info");
300  }
301
302  BarrierSet::Name barrier_set_name()  { return BarrierSet::CardTableModRef; }
303
304  virtual CollectorPolicy::Name kind() {
305    return CollectorPolicy::GenCollectorPolicyKind;
306  }
307};
308
309class MarkSweepPolicy : public GenCollectorPolicy {
310 protected:
311  void initialize_alignments();
312  void initialize_generations();
313
314 public:
315  MarkSweepPolicy() {}
316
317  MarkSweepPolicy* as_mark_sweep_policy() { return this; }
318
319  void initialize_gc_policy_counters();
320};
321
322#endif // SHARE_VM_GC_SHARED_COLLECTORPOLICY_HPP
323