defNewGeneration.hpp revision 13243:7235bc30c0d7
1/*
2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_GC_SERIAL_DEFNEWGENERATION_HPP
26#define SHARE_VM_GC_SERIAL_DEFNEWGENERATION_HPP
27
28#include "gc/serial/cSpaceCounters.hpp"
29#include "gc/shared/ageTable.hpp"
30#include "gc/shared/copyFailedInfo.hpp"
31#include "gc/shared/generation.hpp"
32#include "gc/shared/generationCounters.hpp"
33#include "gc/shared/preservedMarks.hpp"
34#include "utilities/stack.hpp"
35
36class ContiguousSpace;
37class ScanClosure;
38class STWGCTimer;
39class CSpaceCounters;
40class ScanWeakRefClosure;
41
42// DefNewGeneration is a young generation containing eden, from- and
43// to-space.
44
45class DefNewGeneration: public Generation {
46  friend class VMStructs;
47
48protected:
49  Generation* _old_gen;
50  uint        _tenuring_threshold;   // Tenuring threshold for next collection.
51  AgeTable    _age_table;
52  // Size of object to pretenure in words; command line provides bytes
53  size_t      _pretenure_size_threshold_words;
54
55  AgeTable*   age_table() { return &_age_table; }
56
57  // Initialize state to optimistically assume no promotion failure will
58  // happen.
59  void   init_assuming_no_promotion_failure();
60  // True iff a promotion has failed in the current collection.
61  bool   _promotion_failed;
62  bool   promotion_failed() { return _promotion_failed; }
63  PromotionFailedInfo _promotion_failed_info;
64
65  // Handling promotion failure.  A young generation collection
66  // can fail if a live object cannot be copied out of its
67  // location in eden or from-space during the collection.  If
68  // a collection fails, the young generation is left in a
69  // consistent state such that it can be collected by a
70  // full collection.
71  //   Before the collection
72  //     Objects are in eden or from-space
73  //     All roots into the young generation point into eden or from-space.
74  //
75  //   After a failed collection
76  //     Objects may be in eden, from-space, or to-space
77  //     An object A in eden or from-space may have a copy B
78  //       in to-space.  If B exists, all roots that once pointed
79  //       to A must now point to B.
80  //     All objects in the young generation are unmarked.
81  //     Eden, from-space, and to-space will all be collected by
82  //       the full collection.
83  void handle_promotion_failure(oop);
84
85  // In the absence of promotion failure, we wouldn't look at "from-space"
86  // objects after a young-gen collection.  When promotion fails, however,
87  // the subsequent full collection will look at from-space objects:
88  // therefore we must remove their forwarding pointers.
89  void remove_forwarding_pointers();
90
91  // Preserved marks
92  PreservedMarksSet _preserved_marks_set;
93
94  // Promotion failure handling
95  ExtendedOopClosure *_promo_failure_scan_stack_closure;
96  void set_promo_failure_scan_stack_closure(ExtendedOopClosure *scan_stack_closure) {
97    _promo_failure_scan_stack_closure = scan_stack_closure;
98  }
99
100  Stack<oop, mtGC> _promo_failure_scan_stack;
101  void drain_promo_failure_scan_stack(void);
102  bool _promo_failure_drain_in_progress;
103
104  // Performance Counters
105  GenerationCounters*  _gen_counters;
106  CSpaceCounters*      _eden_counters;
107  CSpaceCounters*      _from_counters;
108  CSpaceCounters*      _to_counters;
109
110  // sizing information
111  size_t               _max_eden_size;
112  size_t               _max_survivor_size;
113
114  // Allocation support
115  bool _should_allocate_from_space;
116  bool should_allocate_from_space() const {
117    return _should_allocate_from_space;
118  }
119  void clear_should_allocate_from_space() {
120    _should_allocate_from_space = false;
121  }
122  void set_should_allocate_from_space() {
123    _should_allocate_from_space = true;
124  }
125
126  // Tenuring
127  void adjust_desired_tenuring_threshold();
128
129  // Spaces
130  ContiguousSpace* _eden_space;
131  ContiguousSpace* _from_space;
132  ContiguousSpace* _to_space;
133
134  STWGCTimer* _gc_timer;
135
136  enum SomeProtectedConstants {
137    // Generations are GenGrain-aligned and have size that are multiples of
138    // GenGrain.
139    MinFreeScratchWords = 100
140  };
141
142  // Return the size of a survivor space if this generation were of size
143  // gen_size.
144  size_t compute_survivor_size(size_t gen_size, size_t alignment) const {
145    size_t n = gen_size / (SurvivorRatio + 2);
146    return n > alignment ? align_down(n, alignment) : alignment;
147  }
148
149 public:  // was "protected" but caused compile error on win32
150  class IsAliveClosure: public BoolObjectClosure {
151    Generation* _young_gen;
152  public:
153    IsAliveClosure(Generation* young_gen);
154    bool do_object_b(oop p);
155  };
156
157  class KeepAliveClosure: public OopClosure {
158  protected:
159    ScanWeakRefClosure* _cl;
160    CardTableRS* _rs;
161    template <class T> void do_oop_work(T* p);
162  public:
163    KeepAliveClosure(ScanWeakRefClosure* cl);
164    virtual void do_oop(oop* p);
165    virtual void do_oop(narrowOop* p);
166  };
167
168  class FastKeepAliveClosure: public KeepAliveClosure {
169  protected:
170    HeapWord* _boundary;
171    template <class T> void do_oop_work(T* p);
172  public:
173    FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl);
174    virtual void do_oop(oop* p);
175    virtual void do_oop(narrowOop* p);
176  };
177
178  class EvacuateFollowersClosure: public VoidClosure {
179    GenCollectedHeap* _gch;
180    ScanClosure* _scan_cur_or_nonheap;
181    ScanClosure* _scan_older;
182  public:
183    EvacuateFollowersClosure(GenCollectedHeap* gch,
184                             ScanClosure* cur, ScanClosure* older);
185    void do_void();
186  };
187
188  class FastEvacuateFollowersClosure: public VoidClosure {
189    GenCollectedHeap* _gch;
190    DefNewGeneration* _young_gen;
191    FastScanClosure* _scan_cur_or_nonheap;
192    FastScanClosure* _scan_older;
193  public:
194    FastEvacuateFollowersClosure(GenCollectedHeap* gch,
195                                 FastScanClosure* cur,
196                                 FastScanClosure* older);
197    void do_void();
198  };
199
200 public:
201  DefNewGeneration(ReservedSpace rs, size_t initial_byte_size,
202                   const char* policy="Copy");
203
204  virtual void ref_processor_init();
205
206  virtual Generation::Name kind() { return Generation::DefNew; }
207
208  // Accessing spaces
209  ContiguousSpace* eden() const           { return _eden_space; }
210  ContiguousSpace* from() const           { return _from_space; }
211  ContiguousSpace* to()   const           { return _to_space;   }
212
213  virtual CompactibleSpace* first_compaction_space() const;
214
215  // Space enquiries
216  size_t capacity() const;
217  size_t used() const;
218  size_t free() const;
219  size_t max_capacity() const;
220  size_t capacity_before_gc() const;
221  size_t unsafe_max_alloc_nogc() const;
222  size_t contiguous_available() const;
223
224  size_t max_eden_size() const              { return _max_eden_size; }
225  size_t max_survivor_size() const          { return _max_survivor_size; }
226
227  bool supports_inline_contig_alloc() const { return true; }
228  HeapWord* volatile* top_addr() const;
229  HeapWord** end_addr() const;
230
231  // Thread-local allocation buffers
232  bool supports_tlab_allocation() const { return true; }
233  size_t tlab_capacity() const;
234  size_t tlab_used() const;
235  size_t unsafe_max_tlab_alloc() const;
236
237  // Grow the generation by the specified number of bytes.
238  // The size of bytes is assumed to be properly aligned.
239  // Return true if the expansion was successful.
240  bool expand(size_t bytes);
241
242  // DefNewGeneration cannot currently expand except at
243  // a GC.
244  virtual bool is_maximal_no_gc() const { return true; }
245
246  // Iteration
247  void object_iterate(ObjectClosure* blk);
248
249  void younger_refs_iterate(OopsInGenClosure* cl, uint n_threads);
250
251  void space_iterate(SpaceClosure* blk, bool usedOnly = false);
252
253  // Allocation support
254  virtual bool should_allocate(size_t word_size, bool is_tlab) {
255    assert(UseTLAB || !is_tlab, "Should not allocate tlab");
256
257    size_t overflow_limit    = (size_t)1 << (BitsPerSize_t - LogHeapWordSize);
258
259    const bool non_zero      = word_size > 0;
260    const bool overflows     = word_size >= overflow_limit;
261    const bool check_too_big = _pretenure_size_threshold_words > 0;
262    const bool not_too_big   = word_size < _pretenure_size_threshold_words;
263    const bool size_ok       = is_tlab || !check_too_big || not_too_big;
264
265    bool result = !overflows &&
266                  non_zero   &&
267                  size_ok;
268
269    return result;
270  }
271
272  HeapWord* allocate(size_t word_size, bool is_tlab);
273  HeapWord* allocate_from_space(size_t word_size);
274
275  HeapWord* par_allocate(size_t word_size, bool is_tlab);
276
277  virtual void gc_epilogue(bool full);
278
279  // Save the tops for eden, from, and to
280  virtual void record_spaces_top();
281
282  // Accessing marks
283  void save_marks();
284  void reset_saved_marks();
285  bool no_allocs_since_save_marks();
286
287  // Need to declare the full complement of closures, whether we'll
288  // override them or not, or get message from the compiler:
289  //   oop_since_save_marks_iterate_nv hides virtual function...
290#define DefNew_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
291  void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
292
293  ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DECL)
294
295#undef DefNew_SINCE_SAVE_MARKS_DECL
296
297  // For non-youngest collection, the DefNewGeneration can contribute
298  // "to-space".
299  virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
300                          size_t max_alloc_words);
301
302  // Reset for contribution of "to-space".
303  virtual void reset_scratch();
304
305  // GC support
306  virtual void compute_new_size();
307
308  // Returns true if the collection is likely to be safely
309  // completed. Even if this method returns true, a collection
310  // may not be guaranteed to succeed, and the system should be
311  // able to safely unwind and recover from that failure, albeit
312  // at some additional cost. Override superclass's implementation.
313  virtual bool collection_attempt_is_safe();
314
315  virtual void collect(bool   full,
316                       bool   clear_all_soft_refs,
317                       size_t size,
318                       bool   is_tlab);
319  HeapWord* expand_and_allocate(size_t size,
320                                bool is_tlab,
321                                bool parallel = false);
322
323  oop copy_to_survivor_space(oop old);
324  uint tenuring_threshold() { return _tenuring_threshold; }
325
326  // Performance Counter support
327  void update_counters();
328
329  // Printing
330  virtual const char* name() const;
331  virtual const char* short_name() const { return "DefNew"; }
332
333  void print_on(outputStream* st) const;
334
335  void verify();
336
337  bool promo_failure_scan_is_complete() const {
338    return _promo_failure_scan_stack.is_empty();
339  }
340
341 protected:
342  // If clear_space is true, clear the survivor spaces.  Eden is
343  // cleared if the minimum size of eden is 0.  If mangle_space
344  // is true, also mangle the space in debug mode.
345  void compute_space_boundaries(uintx minimum_eden_size,
346                                bool clear_space,
347                                bool mangle_space);
348
349  // Return adjusted new size for NewSizeThreadIncrease.
350  // If any overflow happens, revert to previous new size.
351  size_t adjust_for_thread_increase(size_t new_size_candidate,
352                                    size_t new_size_before,
353                                    size_t alignment) const;
354
355
356  // Scavenge support
357  void swap_spaces();
358};
359
360#endif // SHARE_VM_GC_SERIAL_DEFNEWGENERATION_HPP
361