iterator.hpp revision 3602:da91efe96a93
1103856Stjr/*
2103856Stjr * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3103856Stjr * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4103856Stjr *
5103856Stjr * This code is free software; you can redistribute it and/or modify it
6103856Stjr * under the terms of the GNU General Public License version 2 only, as
7103856Stjr * published by the Free Software Foundation.
8103856Stjr *
9103856Stjr * This code is distributed in the hope that it will be useful, but WITHOUT
10103856Stjr * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11103856Stjr * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12103856Stjr * version 2 for more details (a copy is included in the LICENSE file that
13103856Stjr * accompanied this code).
14103856Stjr *
15103856Stjr * You should have received a copy of the GNU General Public License version
16103856Stjr * 2 along with this work; if not, write to the Free Software Foundation,
17103856Stjr * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18103856Stjr *
19103856Stjr * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20103856Stjr * or visit www.oracle.com if you need additional information or have any
21103856Stjr * questions.
22103856Stjr *
23103856Stjr */
24103856Stjr
25103856Stjr#ifndef SHARE_VM_MEMORY_ITERATOR_HPP
26103856Stjr#define SHARE_VM_MEMORY_ITERATOR_HPP
27103856Stjr
28103856Stjr#include "memory/allocation.hpp"
29103856Stjr#include "memory/memRegion.hpp"
30103856Stjr#include "runtime/prefetch.hpp"
31103856Stjr#include "utilities/top.hpp"
32103856Stjr
33103856Stjr// The following classes are C++ `closures` for iterating over objects, roots and spaces
34103856Stjr
35103856Stjrclass CodeBlob;
36103856Stjrclass nmethod;
37103856Stjrclass ReferenceProcessor;
38103856Stjrclass DataLayout;
39103856Stjrclass KlassClosure;
40class ClassLoaderData;
41
42// Closure provides abortability.
43
44class Closure : public StackObj {
45 protected:
46  bool _abort;
47  void set_abort() { _abort = true; }
48 public:
49  Closure() : _abort(false) {}
50  // A subtype can use this mechanism to indicate to some iterator mapping
51  // functions that the iteration should cease.
52  bool abort() { return _abort; }
53  void clear_abort() { _abort = false; }
54};
55
56// OopClosure is used for iterating through references to Java objects.
57
58class OopClosure : public Closure {
59 public:
60  virtual void do_oop(oop* o) = 0;
61  virtual void do_oop_v(oop* o) { do_oop(o); }
62  virtual void do_oop(narrowOop* o) = 0;
63  virtual void do_oop_v(narrowOop* o) { do_oop(o); }
64};
65
66// ExtendedOopClosure adds extra code to be run during oop iterations.
67// This is needed by the GC and is extracted to a separate type to not
68// pollute the OopClosure interface.
69class ExtendedOopClosure : public OopClosure {
70 public:
71  ReferenceProcessor* _ref_processor;
72  ExtendedOopClosure(ReferenceProcessor* rp) : _ref_processor(rp) { }
73  ExtendedOopClosure() : OopClosure(), _ref_processor(NULL) { }
74
75  // If the do_metadata functions return "true",
76  // we invoke the following when running oop_iterate():
77  //
78  // 1) do_klass on the header klass pointer.
79  // 2) do_klass on the klass pointer in the mirrors.
80  // 3) do_class_loader_data on the class loader data in class loaders.
81  //
82  // The virtual (without suffix) and the non-virtual (with _nv suffix) need
83  // to be updated together, or else the devirtualization will break.
84  //
85  // Providing default implementations of the _nv functions unfortunately
86  // removes the compile-time safeness, but reduces the clutter for the
87  // ExtendedOopClosures that don't need to walk the metadata. Currently,
88  // only CMS needs these.
89
90  virtual bool do_metadata() { return do_metadata_nv(); }
91  bool do_metadata_v()       { return do_metadata(); }
92  bool do_metadata_nv()      { return false; }
93
94  virtual void do_klass(Klass* k)   { do_klass_nv(k); }
95  void do_klass_v(Klass* k)         { do_klass(k); }
96  void do_klass_nv(Klass* k)        { ShouldNotReachHere(); }
97
98  virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); }
99
100  // Controls how prefetching is done for invocations of this closure.
101  Prefetch::style prefetch_style() { // Note that this is non-virtual.
102    return Prefetch::do_none;
103  }
104
105  // True iff this closure may be safely applied more than once to an oop
106  // location without an intervening "major reset" (like the end of a GC).
107  virtual bool idempotent() { return false; }
108  virtual bool apply_to_weak_ref_discovered_field() { return false; }
109};
110
111// Wrapper closure only used to implement oop_iterate_no_header().
112class NoHeaderExtendedOopClosure : public ExtendedOopClosure {
113  OopClosure* _wrapped_closure;
114 public:
115  NoHeaderExtendedOopClosure(OopClosure* cl) : _wrapped_closure(cl) {}
116  // Warning: this calls the virtual version do_oop in the the wrapped closure.
117  void do_oop_nv(oop* p)       { _wrapped_closure->do_oop(p); }
118  void do_oop_nv(narrowOop* p) { _wrapped_closure->do_oop(p); }
119
120  void do_oop(oop* p)          { assert(false, "Only the _nv versions should be used");
121                                 _wrapped_closure->do_oop(p); }
122  void do_oop(narrowOop* p)    { assert(false, "Only the _nv versions should be used");
123                                 _wrapped_closure->do_oop(p);}
124};
125
126class KlassClosure : public Closure {
127 public:
128  virtual void do_klass(Klass* k) = 0;
129};
130
131// ObjectClosure is used for iterating through an object space
132
133class ObjectClosure : public Closure {
134 public:
135  // Called for each object.
136  virtual void do_object(oop obj) = 0;
137};
138
139
140class BoolObjectClosure : public ObjectClosure {
141 public:
142  virtual bool do_object_b(oop obj) = 0;
143};
144
145// Applies an oop closure to all ref fields in objects iterated over in an
146// object iteration.
147class ObjectToOopClosure: public ObjectClosure {
148  ExtendedOopClosure* _cl;
149public:
150  void do_object(oop obj);
151  ObjectToOopClosure(ExtendedOopClosure* cl) : _cl(cl) {}
152};
153
154// A version of ObjectClosure with "memory" (see _previous_address below)
155class UpwardsObjectClosure: public BoolObjectClosure {
156  HeapWord* _previous_address;
157 public:
158  UpwardsObjectClosure() : _previous_address(NULL) { }
159  void set_previous(HeapWord* addr) { _previous_address = addr; }
160  HeapWord* previous()              { return _previous_address; }
161  // A return value of "true" can be used by the caller to decide
162  // if this object's end should *NOT* be recorded in
163  // _previous_address above.
164  virtual bool do_object_bm(oop obj, MemRegion mr) = 0;
165};
166
167// A version of ObjectClosure that is expected to be robust
168// in the face of possibly uninitialized objects.
169class ObjectClosureCareful : public ObjectClosure {
170 public:
171  virtual size_t do_object_careful_m(oop p, MemRegion mr) = 0;
172  virtual size_t do_object_careful(oop p) = 0;
173};
174
175// The following are used in CompactibleFreeListSpace and
176// ConcurrentMarkSweepGeneration.
177
178// Blk closure (abstract class)
179class BlkClosure : public StackObj {
180 public:
181  virtual size_t do_blk(HeapWord* addr) = 0;
182};
183
184// A version of BlkClosure that is expected to be robust
185// in the face of possibly uninitialized objects.
186class BlkClosureCareful : public BlkClosure {
187 public:
188  size_t do_blk(HeapWord* addr) {
189    guarantee(false, "call do_blk_careful instead");
190    return 0;
191  }
192  virtual size_t do_blk_careful(HeapWord* addr) = 0;
193};
194
195// SpaceClosure is used for iterating over spaces
196
197class Space;
198class CompactibleSpace;
199
200class SpaceClosure : public StackObj {
201 public:
202  // Called for each space
203  virtual void do_space(Space* s) = 0;
204};
205
206class CompactibleSpaceClosure : public StackObj {
207 public:
208  // Called for each compactible space
209  virtual void do_space(CompactibleSpace* s) = 0;
210};
211
212
213// CodeBlobClosure is used for iterating through code blobs
214// in the code cache or on thread stacks
215
216class CodeBlobClosure : public Closure {
217 public:
218  // Called for each code blob.
219  virtual void do_code_blob(CodeBlob* cb) = 0;
220};
221
222
223class MarkingCodeBlobClosure : public CodeBlobClosure {
224 public:
225  // Called for each code blob, but at most once per unique blob.
226  virtual void do_newly_marked_nmethod(nmethod* nm) = 0;
227
228  virtual void do_code_blob(CodeBlob* cb);
229    // = { if (!nmethod(cb)->test_set_oops_do_mark())  do_newly_marked_nmethod(cb); }
230
231  class MarkScope : public StackObj {
232  protected:
233    bool _active;
234  public:
235    MarkScope(bool activate = true);
236      // = { if (active) nmethod::oops_do_marking_prologue(); }
237    ~MarkScope();
238      // = { if (active) nmethod::oops_do_marking_epilogue(); }
239  };
240};
241
242
243// Applies an oop closure to all ref fields in code blobs
244// iterated over in an object iteration.
245class CodeBlobToOopClosure: public MarkingCodeBlobClosure {
246  OopClosure* _cl;
247  bool _do_marking;
248public:
249  virtual void do_newly_marked_nmethod(nmethod* cb);
250    // = { cb->oops_do(_cl); }
251  virtual void do_code_blob(CodeBlob* cb);
252    // = { if (_do_marking)  super::do_code_blob(cb); else cb->oops_do(_cl); }
253  CodeBlobToOopClosure(OopClosure* cl, bool do_marking)
254    : _cl(cl), _do_marking(do_marking) {}
255};
256
257
258
259// MonitorClosure is used for iterating over monitors in the monitors cache
260
261class ObjectMonitor;
262
263class MonitorClosure : public StackObj {
264 public:
265  // called for each monitor in cache
266  virtual void do_monitor(ObjectMonitor* m) = 0;
267};
268
269// A closure that is applied without any arguments.
270class VoidClosure : public StackObj {
271 public:
272  // I would have liked to declare this a pure virtual, but that breaks
273  // in mysterious ways, for unknown reasons.
274  virtual void do_void();
275};
276
277
278// YieldClosure is intended for use by iteration loops
279// to incrementalize their work, allowing interleaving
280// of an interruptable task so as to allow other
281// threads to run (which may not otherwise be able to access
282// exclusive resources, for instance). Additionally, the
283// closure also allows for aborting an ongoing iteration
284// by means of checking the return value from the polling
285// call.
286class YieldClosure : public StackObj {
287  public:
288   virtual bool should_return() = 0;
289};
290
291// Abstract closure for serializing data (read or write).
292
293class SerializeClosure : public Closure {
294public:
295  // Return bool indicating whether closure implements read or write.
296  virtual bool reading() const = 0;
297
298  // Read/write the void pointer pointed to by p.
299  virtual void do_ptr(void** p) = 0;
300
301  // Read/write the region specified.
302  virtual void do_region(u_char* start, size_t size) = 0;
303
304  // Check/write the tag.  If reading, then compare the tag against
305  // the passed in value and fail is they don't match.  This allows
306  // for verification that sections of the serialized data are of the
307  // correct length.
308  virtual void do_tag(int tag) = 0;
309};
310
311class SymbolClosure : public StackObj {
312 public:
313  virtual void do_symbol(Symbol**) = 0;
314
315  // Clear LSB in symbol address; it can be set by CPSlot.
316  static Symbol* load_symbol(Symbol** p) {
317    return (Symbol*)(intptr_t(*p) & ~1);
318  }
319
320  // Store symbol, adjusting new pointer if the original pointer was adjusted
321  // (symbol references in constant pool slots have their LSB set to 1).
322  static void store_symbol(Symbol** p, Symbol* sym) {
323    *p = (Symbol*)(intptr_t(sym) | (intptr_t(*p) & 1));
324  }
325};
326
327#endif // SHARE_VM_MEMORY_ITERATOR_HPP
328