parallelScavengeHeap.hpp revision 8413:92457dfb91bd
1/*
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_GC_PARALLEL_PARALLELSCAVENGEHEAP_HPP
26#define SHARE_VM_GC_PARALLEL_PARALLELSCAVENGEHEAP_HPP
27
28#include "gc/parallel/generationSizer.hpp"
29#include "gc/parallel/objectStartArray.hpp"
30#include "gc/parallel/psGCAdaptivePolicyCounters.hpp"
31#include "gc/parallel/psOldGen.hpp"
32#include "gc/parallel/psYoungGen.hpp"
33#include "gc/shared/collectedHeap.hpp"
34#include "gc/shared/collectorPolicy.hpp"
35#include "gc/shared/gcPolicyCounters.hpp"
36#include "gc/shared/gcWhen.hpp"
37#include "gc/shared/strongRootsScope.hpp"
38#include "utilities/ostream.hpp"
39
40class AdjoiningGenerations;
41class GCHeapSummary;
42class GCTaskManager;
43class PSAdaptiveSizePolicy;
44class PSHeapSummary;
45
46class ParallelScavengeHeap : public CollectedHeap {
47  friend class VMStructs;
48 private:
49  static PSYoungGen* _young_gen;
50  static PSOldGen*   _old_gen;
51
52  // Sizing policy for entire heap
53  static PSAdaptiveSizePolicy*       _size_policy;
54  static PSGCAdaptivePolicyCounters* _gc_policy_counters;
55
56  GenerationSizer* _collector_policy;
57
58  // Collection of generations that are adjacent in the
59  // space reserved for the heap.
60  AdjoiningGenerations* _gens;
61  unsigned int _death_march_count;
62
63  // The task manager
64  static GCTaskManager* _gc_task_manager;
65
66  void trace_heap(GCWhen::Type when, const GCTracer* tracer);
67
68 protected:
69  static inline size_t total_invocations();
70  HeapWord* allocate_new_tlab(size_t size);
71
72  inline bool should_alloc_in_eden(size_t size) const;
73  inline void death_march_check(HeapWord* const result, size_t size);
74  HeapWord* mem_allocate_old_gen(size_t size);
75
76 public:
77  ParallelScavengeHeap(GenerationSizer* policy) :
78    CollectedHeap(), _collector_policy(policy), _death_march_count(0) { }
79
80  // For use by VM operations
81  enum CollectionType {
82    Scavenge,
83    MarkSweep
84  };
85
86  virtual Name kind() const {
87    return CollectedHeap::ParallelScavengeHeap;
88  }
89
90  virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; }
91
92  static PSYoungGen* young_gen() { return _young_gen; }
93  static PSOldGen* old_gen()     { return _old_gen; }
94
95  virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; }
96
97  static PSGCAdaptivePolicyCounters* gc_policy_counters() { return _gc_policy_counters; }
98
99  static ParallelScavengeHeap* heap();
100
101  static GCTaskManager* const gc_task_manager() { return _gc_task_manager; }
102
103  AdjoiningGenerations* gens() { return _gens; }
104
105  // Returns JNI_OK on success
106  virtual jint initialize();
107
108  void post_initialize();
109  void update_counters();
110
111  // The alignment used for the various areas
112  size_t space_alignment()      { return _collector_policy->space_alignment(); }
113  size_t generation_alignment() { return _collector_policy->gen_alignment(); }
114
115  // Return the (conservative) maximum heap alignment
116  static size_t conservative_max_heap_alignment() {
117    return CollectorPolicy::compute_heap_alignment();
118  }
119
120  size_t capacity() const;
121  size_t used() const;
122
123  // Return "true" if all generations have reached the
124  // maximal committed limit that they can reach, without a garbage
125  // collection.
126  virtual bool is_maximal_no_gc() const;
127
128  // Return true if the reference points to an object that
129  // can be moved in a partial collection.  For currently implemented
130  // generational collectors that means during a collection of
131  // the young gen.
132  virtual bool is_scavengable(const void* addr);
133
134  size_t max_capacity() const;
135
136  // Whether p is in the allocated part of the heap
137  bool is_in(const void* p) const;
138
139  bool is_in_reserved(const void* p) const;
140
141  bool is_in_young(oop p);  // reserved part
142  bool is_in_old(oop p);    // reserved part
143
144  // Memory allocation.   "gc_time_limit_was_exceeded" will
145  // be set to true if the adaptive size policy determine that
146  // an excessive amount of time is being spent doing collections
147  // and caused a NULL to be returned.  If a NULL is not returned,
148  // "gc_time_limit_was_exceeded" has an undefined meaning.
149  HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
150
151  // Allocation attempt(s) during a safepoint. It should never be called
152  // to allocate a new TLAB as this allocation might be satisfied out
153  // of the old generation.
154  HeapWord* failed_mem_allocate(size_t size);
155
156  // Support for System.gc()
157  void collect(GCCause::Cause cause);
158
159  // These also should be called by the vm thread at a safepoint (e.g., from a
160  // VM operation).
161  //
162  // The first collects the young generation only, unless the scavenge fails; it
163  // will then attempt a full gc.  The second collects the entire heap; if
164  // maximum_compaction is true, it will compact everything and clear all soft
165  // references.
166  inline void invoke_scavenge();
167
168  // Perform a full collection
169  virtual void do_full_collection(bool clear_all_soft_refs);
170
171  bool supports_inline_contig_alloc() const { return !UseNUMA; }
172
173  HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; }
174  HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; }
175
176  void ensure_parsability(bool retire_tlabs);
177  void accumulate_statistics_all_tlabs();
178  void resize_all_tlabs();
179
180  bool supports_tlab_allocation() const { return true; }
181
182  size_t tlab_capacity(Thread* thr) const;
183  size_t tlab_used(Thread* thr) const;
184  size_t unsafe_max_tlab_alloc(Thread* thr) const;
185
186  // Can a compiler initialize a new object without store barriers?
187  // This permission only extends from the creation of a new object
188  // via a TLAB up to the first subsequent safepoint.
189  virtual bool can_elide_tlab_store_barriers() const {
190    return true;
191  }
192
193  virtual bool card_mark_must_follow_store() const {
194    return false;
195  }
196
197  // Return true if we don't we need a store barrier for
198  // initializing stores to an object at this address.
199  virtual bool can_elide_initializing_store_barrier(oop new_obj);
200
201  void object_iterate(ObjectClosure* cl);
202  void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
203
204  HeapWord* block_start(const void* addr) const;
205  size_t block_size(const HeapWord* addr) const;
206  bool block_is_obj(const HeapWord* addr) const;
207
208  jlong millis_since_last_gc();
209
210  void prepare_for_verify();
211  PSHeapSummary create_ps_heap_summary();
212  virtual void print_on(outputStream* st) const;
213  virtual void print_on_error(outputStream* st) const;
214  virtual void print_gc_threads_on(outputStream* st) const;
215  virtual void gc_threads_do(ThreadClosure* tc) const;
216  virtual void print_tracing_info() const;
217
218  void verify(bool silent, VerifyOption option /* ignored */);
219
220  void print_heap_change(size_t prev_used);
221
222  // Resize the young generation.  The reserved space for the
223  // generation may be expanded in preparation for the resize.
224  void resize_young_gen(size_t eden_size, size_t survivor_size);
225
226  // Resize the old generation.  The reserved space for the
227  // generation may be expanded in preparation for the resize.
228  void resize_old_gen(size_t desired_free_space);
229
230  // Save the tops of the spaces in all generations
231  void record_gen_tops_before_GC() PRODUCT_RETURN;
232
233  // Mangle the unused parts of all spaces in the heap
234  void gen_mangle_unused_area() PRODUCT_RETURN;
235
236  // Call these in sequential code around the processing of strong roots.
237  class ParStrongRootsScope : public MarkScope {
238   public:
239    ParStrongRootsScope();
240    ~ParStrongRootsScope();
241  };
242};
243
244#endif // SHARE_VM_GC_PARALLEL_PARALLELSCAVENGEHEAP_HPP
245