iterator.hpp revision 5776:de6a9e811145
1/*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_MEMORY_ITERATOR_HPP
26#define SHARE_VM_MEMORY_ITERATOR_HPP
27
28#include "memory/allocation.hpp"
29#include "memory/memRegion.hpp"
30#include "runtime/prefetch.hpp"
31#include "utilities/top.hpp"
32
33// The following classes are C++ `closures` for iterating over objects, roots and spaces
34
35class CodeBlob;
36class nmethod;
37class ReferenceProcessor;
38class DataLayout;
39class KlassClosure;
40class ClassLoaderData;
41
42// Closure provides abortability.
43
44class Closure : public StackObj {
45 protected:
46  bool _abort;
47  void set_abort() { _abort = true; }
48 public:
49  Closure() : _abort(false) {}
50  // A subtype can use this mechanism to indicate to some iterator mapping
51  // functions that the iteration should cease.
52  bool abort() { return _abort; }
53  void clear_abort() { _abort = false; }
54};
55
56// OopClosure is used for iterating through references to Java objects.
57
58class OopClosure : public Closure {
59 public:
60  virtual void do_oop(oop* o) = 0;
61  virtual void do_oop_v(oop* o) { do_oop(o); }
62  virtual void do_oop(narrowOop* o) = 0;
63  virtual void do_oop_v(narrowOop* o) { do_oop(o); }
64};
65
66// ExtendedOopClosure adds extra code to be run during oop iterations.
67// This is needed by the GC and is extracted to a separate type to not
68// pollute the OopClosure interface.
69class ExtendedOopClosure : public OopClosure {
70 public:
71  ReferenceProcessor* _ref_processor;
72  ExtendedOopClosure(ReferenceProcessor* rp) : _ref_processor(rp) { }
73  ExtendedOopClosure() : OopClosure(), _ref_processor(NULL) { }
74
75  // If the do_metadata functions return "true",
76  // we invoke the following when running oop_iterate():
77  //
78  // 1) do_klass on the header klass pointer.
79  // 2) do_klass on the klass pointer in the mirrors.
80  // 3) do_class_loader_data on the class loader data in class loaders.
81  //
82  // The virtual (without suffix) and the non-virtual (with _nv suffix) need
83  // to be updated together, or else the devirtualization will break.
84  //
85  // Providing default implementations of the _nv functions unfortunately
86  // removes the compile-time safeness, but reduces the clutter for the
87  // ExtendedOopClosures that don't need to walk the metadata. Currently,
88  // only CMS needs these.
89
90  virtual bool do_metadata() { return do_metadata_nv(); }
91  bool do_metadata_v()       { return do_metadata(); }
92  bool do_metadata_nv()      { return false; }
93
94  virtual void do_klass(Klass* k)   { do_klass_nv(k); }
95  void do_klass_v(Klass* k)         { do_klass(k); }
96  void do_klass_nv(Klass* k)        { ShouldNotReachHere(); }
97
98  virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); }
99
100  // Controls how prefetching is done for invocations of this closure.
101  Prefetch::style prefetch_style() { // Note that this is non-virtual.
102    return Prefetch::do_none;
103  }
104
105  // True iff this closure may be safely applied more than once to an oop
106  // location without an intervening "major reset" (like the end of a GC).
107  virtual bool idempotent() { return false; }
108  virtual bool apply_to_weak_ref_discovered_field() { return false; }
109};
110
111// Wrapper closure only used to implement oop_iterate_no_header().
112class NoHeaderExtendedOopClosure : public ExtendedOopClosure {
113  OopClosure* _wrapped_closure;
114 public:
115  NoHeaderExtendedOopClosure(OopClosure* cl) : _wrapped_closure(cl) {}
116  // Warning: this calls the virtual version do_oop in the the wrapped closure.
117  void do_oop_nv(oop* p)       { _wrapped_closure->do_oop(p); }
118  void do_oop_nv(narrowOop* p) { _wrapped_closure->do_oop(p); }
119
120  void do_oop(oop* p)          { assert(false, "Only the _nv versions should be used");
121                                 _wrapped_closure->do_oop(p); }
122  void do_oop(narrowOop* p)    { assert(false, "Only the _nv versions should be used");
123                                 _wrapped_closure->do_oop(p);}
124};
125
126class KlassClosure : public Closure {
127 public:
128  virtual void do_klass(Klass* k) = 0;
129};
130
131class KlassToOopClosure : public KlassClosure {
132  OopClosure* _oop_closure;
133 public:
134  KlassToOopClosure(OopClosure* oop_closure) : _oop_closure(oop_closure) {}
135  virtual void do_klass(Klass* k);
136};
137
138class CLDToOopClosure {
139  OopClosure* _oop_closure;
140  KlassToOopClosure _klass_closure;
141  bool _must_claim_cld;
142
143 public:
144  CLDToOopClosure(OopClosure* oop_closure, bool must_claim_cld = true) :
145      _oop_closure(oop_closure),
146      _klass_closure(oop_closure),
147      _must_claim_cld(must_claim_cld) {}
148
149  void do_cld(ClassLoaderData* cld);
150};
151
152// ObjectClosure is used for iterating through an object space
153
154class ObjectClosure : public Closure {
155 public:
156  // Called for each object.
157  virtual void do_object(oop obj) = 0;
158};
159
160
161class BoolObjectClosure : public Closure {
162 public:
163  virtual bool do_object_b(oop obj) = 0;
164};
165
166// Applies an oop closure to all ref fields in objects iterated over in an
167// object iteration.
168class ObjectToOopClosure: public ObjectClosure {
169  ExtendedOopClosure* _cl;
170public:
171  void do_object(oop obj);
172  ObjectToOopClosure(ExtendedOopClosure* cl) : _cl(cl) {}
173};
174
175// A version of ObjectClosure with "memory" (see _previous_address below)
176class UpwardsObjectClosure: public BoolObjectClosure {
177  HeapWord* _previous_address;
178 public:
179  UpwardsObjectClosure() : _previous_address(NULL) { }
180  void set_previous(HeapWord* addr) { _previous_address = addr; }
181  HeapWord* previous()              { return _previous_address; }
182  // A return value of "true" can be used by the caller to decide
183  // if this object's end should *NOT* be recorded in
184  // _previous_address above.
185  virtual bool do_object_bm(oop obj, MemRegion mr) = 0;
186};
187
188// A version of ObjectClosure that is expected to be robust
189// in the face of possibly uninitialized objects.
190class ObjectClosureCareful : public ObjectClosure {
191 public:
192  virtual size_t do_object_careful_m(oop p, MemRegion mr) = 0;
193  virtual size_t do_object_careful(oop p) = 0;
194};
195
196// The following are used in CompactibleFreeListSpace and
197// ConcurrentMarkSweepGeneration.
198
199// Blk closure (abstract class)
200class BlkClosure : public StackObj {
201 public:
202  virtual size_t do_blk(HeapWord* addr) = 0;
203};
204
205// A version of BlkClosure that is expected to be robust
206// in the face of possibly uninitialized objects.
207class BlkClosureCareful : public BlkClosure {
208 public:
209  size_t do_blk(HeapWord* addr) {
210    guarantee(false, "call do_blk_careful instead");
211    return 0;
212  }
213  virtual size_t do_blk_careful(HeapWord* addr) = 0;
214};
215
216// SpaceClosure is used for iterating over spaces
217
218class Space;
219class CompactibleSpace;
220
221class SpaceClosure : public StackObj {
222 public:
223  // Called for each space
224  virtual void do_space(Space* s) = 0;
225};
226
227class CompactibleSpaceClosure : public StackObj {
228 public:
229  // Called for each compactible space
230  virtual void do_space(CompactibleSpace* s) = 0;
231};
232
233
234// CodeBlobClosure is used for iterating through code blobs
235// in the code cache or on thread stacks
236
237class CodeBlobClosure : public Closure {
238 public:
239  // Called for each code blob.
240  virtual void do_code_blob(CodeBlob* cb) = 0;
241};
242
243
244class MarkingCodeBlobClosure : public CodeBlobClosure {
245 public:
246  // Called for each code blob, but at most once per unique blob.
247  virtual void do_newly_marked_nmethod(nmethod* nm) = 0;
248
249  virtual void do_code_blob(CodeBlob* cb);
250    // = { if (!nmethod(cb)->test_set_oops_do_mark())  do_newly_marked_nmethod(cb); }
251
252  class MarkScope : public StackObj {
253  protected:
254    bool _active;
255  public:
256    MarkScope(bool activate = true);
257      // = { if (active) nmethod::oops_do_marking_prologue(); }
258    ~MarkScope();
259      // = { if (active) nmethod::oops_do_marking_epilogue(); }
260  };
261};
262
263
264// Applies an oop closure to all ref fields in code blobs
265// iterated over in an object iteration.
266class CodeBlobToOopClosure: public MarkingCodeBlobClosure {
267  OopClosure* _cl;
268  bool _do_marking;
269public:
270  virtual void do_newly_marked_nmethod(nmethod* cb);
271    // = { cb->oops_do(_cl); }
272  virtual void do_code_blob(CodeBlob* cb);
273    // = { if (_do_marking)  super::do_code_blob(cb); else cb->oops_do(_cl); }
274  CodeBlobToOopClosure(OopClosure* cl, bool do_marking)
275    : _cl(cl), _do_marking(do_marking) {}
276};
277
278
279
280// MonitorClosure is used for iterating over monitors in the monitors cache
281
282class ObjectMonitor;
283
284class MonitorClosure : public StackObj {
285 public:
286  // called for each monitor in cache
287  virtual void do_monitor(ObjectMonitor* m) = 0;
288};
289
290// A closure that is applied without any arguments.
291class VoidClosure : public StackObj {
292 public:
293  // I would have liked to declare this a pure virtual, but that breaks
294  // in mysterious ways, for unknown reasons.
295  virtual void do_void();
296};
297
298
299// YieldClosure is intended for use by iteration loops
300// to incrementalize their work, allowing interleaving
301// of an interruptable task so as to allow other
302// threads to run (which may not otherwise be able to access
303// exclusive resources, for instance). Additionally, the
304// closure also allows for aborting an ongoing iteration
305// by means of checking the return value from the polling
306// call.
307class YieldClosure : public StackObj {
308  public:
309   virtual bool should_return() = 0;
310};
311
312// Abstract closure for serializing data (read or write).
313
314class SerializeClosure : public Closure {
315public:
316  // Return bool indicating whether closure implements read or write.
317  virtual bool reading() const = 0;
318
319  // Read/write the void pointer pointed to by p.
320  virtual void do_ptr(void** p) = 0;
321
322  // Read/write the region specified.
323  virtual void do_region(u_char* start, size_t size) = 0;
324
325  // Check/write the tag.  If reading, then compare the tag against
326  // the passed in value and fail is they don't match.  This allows
327  // for verification that sections of the serialized data are of the
328  // correct length.
329  virtual void do_tag(int tag) = 0;
330};
331
332class SymbolClosure : public StackObj {
333 public:
334  virtual void do_symbol(Symbol**) = 0;
335
336  // Clear LSB in symbol address; it can be set by CPSlot.
337  static Symbol* load_symbol(Symbol** p) {
338    return (Symbol*)(intptr_t(*p) & ~1);
339  }
340
341  // Store symbol, adjusting new pointer if the original pointer was adjusted
342  // (symbol references in constant pool slots have their LSB set to 1).
343  static void store_symbol(Symbol** p, Symbol* sym) {
344    *p = (Symbol*)(intptr_t(sym) | (intptr_t(*p) & 1));
345  }
346};
347
348#endif // SHARE_VM_MEMORY_ITERATOR_HPP
349