1/*
2 * Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_GC_CMS_CMSOOPCLOSURES_HPP
26#define SHARE_VM_GC_CMS_CMSOOPCLOSURES_HPP
27
28#include "gc/shared/genOopClosures.hpp"
29#include "gc/shared/taskqueue.hpp"
30#include "memory/iterator.hpp"
31
32/////////////////////////////////////////////////////////////////
33// Closures used by ConcurrentMarkSweepGeneration's collector
34/////////////////////////////////////////////////////////////////
35class ConcurrentMarkSweepGeneration;
36class CMSBitMap;
37class CMSMarkStack;
38class CMSCollector;
39class MarkFromRootsClosure;
40class ParMarkFromRootsClosure;
41
42// Decode the oop and call do_oop on it.
43#define DO_OOP_WORK_DEFN                             \
44  void do_oop(oop obj);                              \
45  template <class T> inline void do_oop_work(T* p);
46
47// TODO: This duplication of the MetadataAwareOopClosure class is only needed
48//       because some CMS OopClosures derive from OopsInGenClosure. It would be
49//       good to get rid of them completely.
50class MetadataAwareOopsInGenClosure: public OopsInGenClosure {
51  KlassToOopClosure _klass_closure;
52 public:
53  MetadataAwareOopsInGenClosure() {
54    _klass_closure.initialize(this);
55  }
56
57  virtual bool do_metadata()    { return do_metadata_nv(); }
58  inline  bool do_metadata_nv() { return true; }
59
60  virtual void do_klass(Klass* k);
61  void do_klass_nv(Klass* k);
62
63  virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
64  void do_cld_nv(ClassLoaderData* cld);
65};
66
67class MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
68 private:
69  const MemRegion _span;
70  CMSBitMap*      _bitMap;
71 protected:
72  DO_OOP_WORK_DEFN
73 public:
74  MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
75  virtual void do_oop(oop* p);
76  virtual void do_oop(narrowOop* p);
77};
78
79class ParMarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
80 private:
81  const MemRegion _span;
82  CMSBitMap*      _bitMap;
83 protected:
84  DO_OOP_WORK_DEFN
85 public:
86  ParMarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
87  virtual void do_oop(oop* p);
88  virtual void do_oop(narrowOop* p);
89};
90
91// A variant of the above used in certain kinds of CMS
92// marking verification.
93class MarkRefsIntoVerifyClosure: public MetadataAwareOopsInGenClosure {
94 private:
95  const MemRegion _span;
96  CMSBitMap*      _verification_bm;
97  CMSBitMap*      _cms_bm;
98 protected:
99  DO_OOP_WORK_DEFN
100 public:
101  MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
102                            CMSBitMap* cms_bm);
103  virtual void do_oop(oop* p);
104  virtual void do_oop(narrowOop* p);
105};
106
107// The non-parallel version (the parallel version appears further below).
108class PushAndMarkClosure: public MetadataAwareOopClosure {
109 private:
110  CMSCollector* _collector;
111  MemRegion     _span;
112  CMSBitMap*    _bit_map;
113  CMSBitMap*    _mod_union_table;
114  CMSMarkStack* _mark_stack;
115  bool          _concurrent_precleaning;
116 protected:
117  DO_OOP_WORK_DEFN
118 public:
119  PushAndMarkClosure(CMSCollector* collector,
120                     MemRegion span,
121                     ReferenceProcessor* rp,
122                     CMSBitMap* bit_map,
123                     CMSBitMap* mod_union_table,
124                     CMSMarkStack* mark_stack,
125                     bool concurrent_precleaning);
126  virtual void do_oop(oop* p);
127  virtual void do_oop(narrowOop* p);
128  inline void do_oop_nv(oop* p);
129  inline void do_oop_nv(narrowOop* p);
130};
131
132// In the parallel case, the bit map and the
133// reference processor are currently all shared. Access to
134// these shared mutable structures must use appropriate
135// synchronization (for instance, via CAS). The marking stack
136// used in the non-parallel case above is here replaced with
137// an OopTaskQueue structure to allow efficient work stealing.
138class ParPushAndMarkClosure: public MetadataAwareOopClosure {
139 private:
140  CMSCollector* _collector;
141  MemRegion     _span;
142  CMSBitMap*    _bit_map;
143  OopTaskQueue* _work_queue;
144 protected:
145  DO_OOP_WORK_DEFN
146 public:
147  ParPushAndMarkClosure(CMSCollector* collector,
148                        MemRegion span,
149                        ReferenceProcessor* rp,
150                        CMSBitMap* bit_map,
151                        OopTaskQueue* work_queue);
152  virtual void do_oop(oop* p);
153  virtual void do_oop(narrowOop* p);
154  inline void do_oop_nv(oop* p);
155  inline void do_oop_nv(narrowOop* p);
156};
157
158// The non-parallel version (the parallel version appears further below).
159class MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
160 private:
161  MemRegion          _span;
162  CMSBitMap*         _bit_map;
163  CMSMarkStack*      _mark_stack;
164  PushAndMarkClosure _pushAndMarkClosure;
165  CMSCollector*      _collector;
166  Mutex*             _freelistLock;
167  bool               _yield;
168  // Whether closure is being used for concurrent precleaning
169  bool               _concurrent_precleaning;
170 protected:
171  DO_OOP_WORK_DEFN
172 public:
173  MarkRefsIntoAndScanClosure(MemRegion span,
174                             ReferenceProcessor* rp,
175                             CMSBitMap* bit_map,
176                             CMSBitMap* mod_union_table,
177                             CMSMarkStack* mark_stack,
178                             CMSCollector* collector,
179                             bool should_yield,
180                             bool concurrent_precleaning);
181  virtual void do_oop(oop* p);
182  virtual void do_oop(narrowOop* p);
183  inline void do_oop_nv(oop* p);
184  inline void do_oop_nv(narrowOop* p);
185
186  void set_freelistLock(Mutex* m) {
187    _freelistLock = m;
188  }
189
190 private:
191  inline void do_yield_check();
192  void do_yield_work();
193  bool take_from_overflow_list();
194};
195
196// In this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
197// stack and the bitMap are shared, so access needs to be suitably
198// synchronized. An OopTaskQueue structure, supporting efficient
199// work stealing, replaces a CMSMarkStack for storing grey objects.
200class ParMarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
201 private:
202  MemRegion             _span;
203  CMSBitMap*            _bit_map;
204  OopTaskQueue*         _work_queue;
205  const uint            _low_water_mark;
206  ParPushAndMarkClosure _parPushAndMarkClosure;
207 protected:
208  DO_OOP_WORK_DEFN
209 public:
210  ParMarkRefsIntoAndScanClosure(CMSCollector* collector,
211                                 MemRegion span,
212                                 ReferenceProcessor* rp,
213                                 CMSBitMap* bit_map,
214                                 OopTaskQueue* work_queue);
215  virtual void do_oop(oop* p);
216  virtual void do_oop(narrowOop* p);
217  inline void do_oop_nv(oop* p);
218  inline void do_oop_nv(narrowOop* p);
219
220  void trim_queue(uint size);
221};
222
223// This closure is used during the concurrent marking phase
224// following the first checkpoint. Its use is buried in
225// the closure MarkFromRootsClosure.
226class PushOrMarkClosure: public MetadataAwareOopClosure {
227 private:
228  CMSCollector*   _collector;
229  MemRegion       _span;
230  CMSBitMap*      _bitMap;
231  CMSMarkStack*   _markStack;
232  HeapWord* const _finger;
233  MarkFromRootsClosure* const
234                  _parent;
235 protected:
236  DO_OOP_WORK_DEFN
237 public:
238  PushOrMarkClosure(CMSCollector* cms_collector,
239                    MemRegion span,
240                    CMSBitMap* bitMap,
241                    CMSMarkStack* markStack,
242                    HeapWord* finger,
243                    MarkFromRootsClosure* parent);
244  virtual void do_oop(oop* p);
245  virtual void do_oop(narrowOop* p);
246  inline void do_oop_nv(oop* p);
247  inline void do_oop_nv(narrowOop* p);
248
249  // Deal with a stack overflow condition
250  void handle_stack_overflow(HeapWord* lost);
251 private:
252  inline void do_yield_check();
253};
254
255// A parallel (MT) version of the above.
256// This closure is used during the concurrent marking phase
257// following the first checkpoint. Its use is buried in
258// the closure ParMarkFromRootsClosure.
259class ParPushOrMarkClosure: public MetadataAwareOopClosure {
260 private:
261  CMSCollector*                  _collector;
262  MemRegion                      _whole_span;
263  MemRegion                      _span;       // local chunk
264  CMSBitMap*                     _bit_map;
265  OopTaskQueue*                  _work_queue;
266  CMSMarkStack*                  _overflow_stack;
267  HeapWord*  const               _finger;
268  HeapWord* volatile* const      _global_finger_addr;
269  ParMarkFromRootsClosure* const _parent;
270 protected:
271  DO_OOP_WORK_DEFN
272 public:
273  ParPushOrMarkClosure(CMSCollector* cms_collector,
274                       MemRegion span,
275                       CMSBitMap* bit_map,
276                       OopTaskQueue* work_queue,
277                       CMSMarkStack* mark_stack,
278                       HeapWord* finger,
279                       HeapWord* volatile* global_finger_addr,
280                       ParMarkFromRootsClosure* parent);
281  virtual void do_oop(oop* p);
282  virtual void do_oop(narrowOop* p);
283  inline void do_oop_nv(oop* p);
284  inline void do_oop_nv(narrowOop* p);
285
286  // Deal with a stack overflow condition
287  void handle_stack_overflow(HeapWord* lost);
288 private:
289  inline void do_yield_check();
290};
291
292// For objects in CMS generation, this closure marks
293// given objects (transitively) as being reachable/live.
294// This is currently used during the (weak) reference object
295// processing phase of the CMS final checkpoint step, as
296// well as during the concurrent precleaning of the discovered
297// reference lists.
298class CMSKeepAliveClosure: public MetadataAwareOopClosure {
299 private:
300  CMSCollector* _collector;
301  const MemRegion _span;
302  CMSMarkStack* _mark_stack;
303  CMSBitMap*    _bit_map;
304  bool          _concurrent_precleaning;
305 protected:
306  DO_OOP_WORK_DEFN
307 public:
308  CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
309                      CMSBitMap* bit_map, CMSMarkStack* mark_stack,
310                      bool cpc);
311  bool    concurrent_precleaning() const { return _concurrent_precleaning; }
312  virtual void do_oop(oop* p);
313  virtual void do_oop(narrowOop* p);
314  inline void do_oop_nv(oop* p);
315  inline void do_oop_nv(narrowOop* p);
316};
317
318class CMSInnerParMarkAndPushClosure: public MetadataAwareOopClosure {
319 private:
320  CMSCollector* _collector;
321  MemRegion     _span;
322  OopTaskQueue* _work_queue;
323  CMSBitMap*    _bit_map;
324 protected:
325  DO_OOP_WORK_DEFN
326 public:
327  CMSInnerParMarkAndPushClosure(CMSCollector* collector,
328                                MemRegion span, CMSBitMap* bit_map,
329                                OopTaskQueue* work_queue);
330  virtual void do_oop(oop* p);
331  virtual void do_oop(narrowOop* p);
332  inline void do_oop_nv(oop* p);
333  inline void do_oop_nv(narrowOop* p);
334};
335
336// A parallel (MT) version of the above, used when
337// reference processing is parallel; the only difference
338// is in the do_oop method.
339class CMSParKeepAliveClosure: public MetadataAwareOopClosure {
340 private:
341  MemRegion     _span;
342  OopTaskQueue* _work_queue;
343  CMSBitMap*    _bit_map;
344  CMSInnerParMarkAndPushClosure
345                _mark_and_push;
346  const uint    _low_water_mark;
347  void trim_queue(uint max);
348 protected:
349  DO_OOP_WORK_DEFN
350 public:
351  CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
352                         CMSBitMap* bit_map, OopTaskQueue* work_queue);
353  virtual void do_oop(oop* p);
354  virtual void do_oop(narrowOop* p);
355};
356
357#endif // SHARE_VM_GC_CMS_CMSOOPCLOSURES_HPP
358