iterator.hpp revision 6707:a2122d7912ed
1/*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_MEMORY_ITERATOR_HPP
26#define SHARE_VM_MEMORY_ITERATOR_HPP
27
28#include "memory/allocation.hpp"
29#include "memory/memRegion.hpp"
30#include "utilities/top.hpp"
31
32class CodeBlob;
33class nmethod;
34class ReferenceProcessor;
35class DataLayout;
36class KlassClosure;
37class ClassLoaderData;
38
39// The following classes are C++ `closures` for iterating over objects, roots and spaces
40
41class Closure : public StackObj { };
42
43// OopClosure is used for iterating through references to Java objects.
44class OopClosure : public Closure {
45 public:
46  virtual void do_oop(oop* o) = 0;
47  virtual void do_oop_v(oop* o) { do_oop(o); }
48  virtual void do_oop(narrowOop* o) = 0;
49  virtual void do_oop_v(narrowOop* o) { do_oop(o); }
50};
51
52// ExtendedOopClosure adds extra code to be run during oop iterations.
53// This is needed by the GC and is extracted to a separate type to not
54// pollute the OopClosure interface.
55class ExtendedOopClosure : public OopClosure {
56 public:
57  ReferenceProcessor* _ref_processor;
58  ExtendedOopClosure(ReferenceProcessor* rp) : _ref_processor(rp) { }
59  ExtendedOopClosure() : OopClosure(), _ref_processor(NULL) { }
60
61  // If the do_metadata functions return "true",
62  // we invoke the following when running oop_iterate():
63  //
64  // 1) do_klass on the header klass pointer.
65  // 2) do_klass on the klass pointer in the mirrors.
66  // 3) do_class_loader_data on the class loader data in class loaders.
67  //
68  // The virtual (without suffix) and the non-virtual (with _nv suffix) need
69  // to be updated together, or else the devirtualization will break.
70  //
71  // Providing default implementations of the _nv functions unfortunately
72  // removes the compile-time safeness, but reduces the clutter for the
73  // ExtendedOopClosures that don't need to walk the metadata.
74  // Currently, only CMS and G1 need these.
75
76  virtual bool do_metadata() { return do_metadata_nv(); }
77  bool do_metadata_v()       { return do_metadata(); }
78  bool do_metadata_nv()      { return false; }
79
80  virtual void do_klass(Klass* k)   { do_klass_nv(k); }
81  void do_klass_v(Klass* k)         { do_klass(k); }
82  void do_klass_nv(Klass* k)        { ShouldNotReachHere(); }
83
84  virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); }
85
86  // True iff this closure may be safely applied more than once to an oop
87  // location without an intervening "major reset" (like the end of a GC).
88  virtual bool idempotent() { return false; }
89  virtual bool apply_to_weak_ref_discovered_field() { return false; }
90};
91
92// Wrapper closure only used to implement oop_iterate_no_header().
93class NoHeaderExtendedOopClosure : public ExtendedOopClosure {
94  OopClosure* _wrapped_closure;
95 public:
96  NoHeaderExtendedOopClosure(OopClosure* cl) : _wrapped_closure(cl) {}
97  // Warning: this calls the virtual version do_oop in the the wrapped closure.
98  void do_oop_nv(oop* p)       { _wrapped_closure->do_oop(p); }
99  void do_oop_nv(narrowOop* p) { _wrapped_closure->do_oop(p); }
100
101  void do_oop(oop* p)          { assert(false, "Only the _nv versions should be used");
102                                 _wrapped_closure->do_oop(p); }
103  void do_oop(narrowOop* p)    { assert(false, "Only the _nv versions should be used");
104                                 _wrapped_closure->do_oop(p);}
105};
106
107class KlassClosure : public Closure {
108 public:
109  virtual void do_klass(Klass* k) = 0;
110};
111
112class CLDClosure : public Closure {
113 public:
114  virtual void do_cld(ClassLoaderData* cld) = 0;
115};
116
117class KlassToOopClosure : public KlassClosure {
118  friend class MetadataAwareOopClosure;
119  friend class MetadataAwareOopsInGenClosure;
120
121  OopClosure* _oop_closure;
122
123  // Used when _oop_closure couldn't be set in an initialization list.
124  void initialize(OopClosure* oop_closure) {
125    assert(_oop_closure == NULL, "Should only be called once");
126    _oop_closure = oop_closure;
127  }
128
129 public:
130  KlassToOopClosure(OopClosure* oop_closure = NULL) : _oop_closure(oop_closure) {}
131
132  virtual void do_klass(Klass* k);
133};
134
135class CLDToOopClosure : public CLDClosure {
136  OopClosure*       _oop_closure;
137  KlassToOopClosure _klass_closure;
138  bool              _must_claim_cld;
139
140 public:
141  CLDToOopClosure(OopClosure* oop_closure, bool must_claim_cld = true) :
142      _oop_closure(oop_closure),
143      _klass_closure(oop_closure),
144      _must_claim_cld(must_claim_cld) {}
145
146  void do_cld(ClassLoaderData* cld);
147};
148
149class CLDToKlassAndOopClosure : public CLDClosure {
150  friend class SharedHeap;
151  friend class G1CollectedHeap;
152 protected:
153  OopClosure*   _oop_closure;
154  KlassClosure* _klass_closure;
155  bool          _must_claim_cld;
156 public:
157  CLDToKlassAndOopClosure(KlassClosure* klass_closure,
158                          OopClosure* oop_closure,
159                          bool must_claim_cld) :
160                              _oop_closure(oop_closure),
161                              _klass_closure(klass_closure),
162                              _must_claim_cld(must_claim_cld) {}
163  void do_cld(ClassLoaderData* cld);
164};
165
166// The base class for all concurrent marking closures,
167// that participates in class unloading.
168// It's used to proxy through the metadata to the oops defined in them.
169class MetadataAwareOopClosure: public ExtendedOopClosure {
170  KlassToOopClosure _klass_closure;
171
172 public:
173  MetadataAwareOopClosure() : ExtendedOopClosure() {
174    _klass_closure.initialize(this);
175  }
176  MetadataAwareOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) {
177    _klass_closure.initialize(this);
178  }
179
180  virtual bool do_metadata()    { return do_metadata_nv(); }
181  inline  bool do_metadata_nv() { return true; }
182
183  virtual void do_klass(Klass* k);
184  void do_klass_nv(Klass* k);
185
186  virtual void do_class_loader_data(ClassLoaderData* cld);
187};
188
189// ObjectClosure is used for iterating through an object space
190
191class ObjectClosure : public Closure {
192 public:
193  // Called for each object.
194  virtual void do_object(oop obj) = 0;
195};
196
197
198class BoolObjectClosure : public Closure {
199 public:
200  virtual bool do_object_b(oop obj) = 0;
201};
202
203// Applies an oop closure to all ref fields in objects iterated over in an
204// object iteration.
205class ObjectToOopClosure: public ObjectClosure {
206  ExtendedOopClosure* _cl;
207public:
208  void do_object(oop obj);
209  ObjectToOopClosure(ExtendedOopClosure* cl) : _cl(cl) {}
210};
211
212// A version of ObjectClosure that is expected to be robust
213// in the face of possibly uninitialized objects.
214class ObjectClosureCareful : public ObjectClosure {
215 public:
216  virtual size_t do_object_careful_m(oop p, MemRegion mr) = 0;
217  virtual size_t do_object_careful(oop p) = 0;
218};
219
220// The following are used in CompactibleFreeListSpace and
221// ConcurrentMarkSweepGeneration.
222
223// Blk closure (abstract class)
224class BlkClosure : public StackObj {
225 public:
226  virtual size_t do_blk(HeapWord* addr) = 0;
227};
228
229// A version of BlkClosure that is expected to be robust
230// in the face of possibly uninitialized objects.
231class BlkClosureCareful : public BlkClosure {
232 public:
233  size_t do_blk(HeapWord* addr) {
234    guarantee(false, "call do_blk_careful instead");
235    return 0;
236  }
237  virtual size_t do_blk_careful(HeapWord* addr) = 0;
238};
239
240// SpaceClosure is used for iterating over spaces
241
242class Space;
243class CompactibleSpace;
244
245class SpaceClosure : public StackObj {
246 public:
247  // Called for each space
248  virtual void do_space(Space* s) = 0;
249};
250
251class CompactibleSpaceClosure : public StackObj {
252 public:
253  // Called for each compactible space
254  virtual void do_space(CompactibleSpace* s) = 0;
255};
256
257
258// CodeBlobClosure is used for iterating through code blobs
259// in the code cache or on thread stacks
260
261class CodeBlobClosure : public Closure {
262 public:
263  // Called for each code blob.
264  virtual void do_code_blob(CodeBlob* cb) = 0;
265};
266
267// Applies an oop closure to all ref fields in code blobs
268// iterated over in an object iteration.
269class CodeBlobToOopClosure : public CodeBlobClosure {
270  OopClosure* _cl;
271  bool _fix_relocations;
272 protected:
273  void do_nmethod(nmethod* nm);
274 public:
275  CodeBlobToOopClosure(OopClosure* cl, bool fix_relocations) : _cl(cl), _fix_relocations(fix_relocations) {}
276  virtual void do_code_blob(CodeBlob* cb);
277
278  const static bool FixRelocations = true;
279};
280
281class MarkingCodeBlobClosure : public CodeBlobToOopClosure {
282 public:
283  MarkingCodeBlobClosure(OopClosure* cl, bool fix_relocations) : CodeBlobToOopClosure(cl, fix_relocations) {}
284  // Called for each code blob, but at most once per unique blob.
285
286  virtual void do_code_blob(CodeBlob* cb);
287
288  class MarkScope : public StackObj {
289  protected:
290    bool _active;
291  public:
292    MarkScope(bool activate = true);
293      // = { if (active) nmethod::oops_do_marking_prologue(); }
294    ~MarkScope();
295      // = { if (active) nmethod::oops_do_marking_epilogue(); }
296  };
297};
298
299// MonitorClosure is used for iterating over monitors in the monitors cache
300
301class ObjectMonitor;
302
303class MonitorClosure : public StackObj {
304 public:
305  // called for each monitor in cache
306  virtual void do_monitor(ObjectMonitor* m) = 0;
307};
308
309// A closure that is applied without any arguments.
310class VoidClosure : public StackObj {
311 public:
312  // I would have liked to declare this a pure virtual, but that breaks
313  // in mysterious ways, for unknown reasons.
314  virtual void do_void();
315};
316
317
318// YieldClosure is intended for use by iteration loops
319// to incrementalize their work, allowing interleaving
320// of an interruptable task so as to allow other
321// threads to run (which may not otherwise be able to access
322// exclusive resources, for instance). Additionally, the
323// closure also allows for aborting an ongoing iteration
324// by means of checking the return value from the polling
325// call.
326class YieldClosure : public StackObj {
327  public:
328   virtual bool should_return() = 0;
329};
330
331// Abstract closure for serializing data (read or write).
332
333class SerializeClosure : public Closure {
334public:
335  // Return bool indicating whether closure implements read or write.
336  virtual bool reading() const = 0;
337
338  // Read/write the void pointer pointed to by p.
339  virtual void do_ptr(void** p) = 0;
340
341  // Read/write the region specified.
342  virtual void do_region(u_char* start, size_t size) = 0;
343
344  // Check/write the tag.  If reading, then compare the tag against
345  // the passed in value and fail is they don't match.  This allows
346  // for verification that sections of the serialized data are of the
347  // correct length.
348  virtual void do_tag(int tag) = 0;
349};
350
351class SymbolClosure : public StackObj {
352 public:
353  virtual void do_symbol(Symbol**) = 0;
354
355  // Clear LSB in symbol address; it can be set by CPSlot.
356  static Symbol* load_symbol(Symbol** p) {
357    return (Symbol*)(intptr_t(*p) & ~1);
358  }
359
360  // Store symbol, adjusting new pointer if the original pointer was adjusted
361  // (symbol references in constant pool slots have their LSB set to 1).
362  static void store_symbol(Symbol** p, Symbol* sym) {
363    *p = (Symbol*)(intptr_t(sym) | (intptr_t(*p) & 1));
364  }
365};
366
367
368// Helper defines for ExtendOopClosure
369
370#define if_do_metadata_checked(closure, nv_suffix)       \
371  /* Make sure the non-virtual and the virtual versions match. */     \
372  assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \
373      "Inconsistency in do_metadata");                                \
374  if (closure->do_metadata##nv_suffix())
375
376#define assert_should_ignore_metadata(closure, nv_suffix)                                  \
377  assert(!closure->do_metadata##nv_suffix(), "Code to handle metadata is not implemented")
378
379#endif // SHARE_VM_MEMORY_ITERATOR_HPP
380