sparsePRT.hpp revision 8413:92457dfb91bd
1/*
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_GC_G1_SPARSEPRT_HPP
26#define SHARE_VM_GC_G1_SPARSEPRT_HPP
27
28#include "gc/g1/g1CollectedHeap.hpp"
29#include "gc/g1/heapRegion.hpp"
30#include "gc/shared/cardTableModRefBS.hpp"
31#include "memory/allocation.hpp"
32#include "runtime/mutex.hpp"
33#include "utilities/globalDefinitions.hpp"
34
35// Sparse remembered set for a heap region (the "owning" region).  Maps
36// indices of other regions to short sequences of cards in the other region
37// that might contain pointers into the owner region.
38
39// These tables only expand while they are accessed in parallel --
40// deletions may be done in single-threaded code.  This allows us to allow
41// unsynchronized reads/iterations, as long as expansions caused by
42// insertions only enqueue old versions for deletions, but do not delete
43// old versions synchronously.
44
45class SparsePRTEntry: public CHeapObj<mtGC> {
46public:
47  enum SomePublicConstants {
48    NullEntry     = -1,
49    UnrollFactor  =  4
50  };
51private:
52  RegionIdx_t _region_ind;
53  int         _next_index;
54  CardIdx_t   _cards[1];
55  // WARNING: Don't put any data members beyond this line. Card array has, in fact, variable length.
56  // It should always be the last data member.
57public:
58  // Returns the size of the entry, used for entry allocation.
59  static size_t size() { return sizeof(SparsePRTEntry) + sizeof(CardIdx_t) * (cards_num() - 1); }
60  // Returns the size of the card array.
61  static int cards_num() {
62    // The number of cards should be a multiple of 4, because that's our current
63    // unrolling factor.
64    static const int s = MAX2<int>(G1RSetSparseRegionEntries & ~(UnrollFactor - 1), UnrollFactor);
65    return s;
66  }
67
68  // Set the region_ind to the given value, and delete all cards.
69  inline void init(RegionIdx_t region_ind);
70
71  RegionIdx_t r_ind() const { return _region_ind; }
72  bool valid_entry() const { return r_ind() >= 0; }
73  void set_r_ind(RegionIdx_t rind) { _region_ind = rind; }
74
75  int next_index() const { return _next_index; }
76  int* next_index_addr() { return &_next_index; }
77  void set_next_index(int ni) { _next_index = ni; }
78
79  // Returns "true" iff the entry contains the given card index.
80  inline bool contains_card(CardIdx_t card_index) const;
81
82  // Returns the number of non-NULL card entries.
83  inline int num_valid_cards() const;
84
85  // Requires that the entry not contain the given card index.  If there is
86  // space available, add the given card index to the entry and return
87  // "true"; otherwise, return "false" to indicate that the entry is full.
88  enum AddCardResult {
89    overflow,
90    found,
91    added
92  };
93  inline AddCardResult add_card(CardIdx_t card_index);
94
95  // Copy the current entry's cards into "cards".
96  inline void copy_cards(CardIdx_t* cards) const;
97  // Copy the current entry's cards into the "_card" array of "e."
98  inline void copy_cards(SparsePRTEntry* e) const;
99
100  inline CardIdx_t card(int i) const { return _cards[i]; }
101};
102
103
104class RSHashTable : public CHeapObj<mtGC> {
105
106  friend class RSHashTableIter;
107
108  enum SomePrivateConstants {
109    NullEntry = -1
110  };
111
112  size_t _capacity;
113  size_t _capacity_mask;
114  size_t _occupied_entries;
115  size_t _occupied_cards;
116
117  SparsePRTEntry* _entries;
118  int* _buckets;
119  int  _free_region;
120  int  _free_list;
121
122  // Requires that the caller hold a lock preventing parallel modifying
123  // operations, and that the the table be less than completely full.  If
124  // an entry for "region_ind" is already in the table, finds it and
125  // returns its address; otherwise allocates, initializes, inserts and
126  // returns a new entry for "region_ind".
127  SparsePRTEntry* entry_for_region_ind_create(RegionIdx_t region_ind);
128
129  // Returns the index of the next free entry in "_entries".
130  int alloc_entry();
131  // Declares the entry "fi" to be free.  (It must have already been
132  // deleted from any bucket lists.
133  void free_entry(int fi);
134
135public:
136  RSHashTable(size_t capacity);
137  ~RSHashTable();
138
139  // Attempts to ensure that the given card_index in the given region is in
140  // the sparse table.  If successful (because the card was already
141  // present, or because it was successfully added) returns "true".
142  // Otherwise, returns "false" to indicate that the addition would
143  // overflow the entry for the region.  The caller must transfer these
144  // entries to a larger-capacity representation.
145  bool add_card(RegionIdx_t region_id, CardIdx_t card_index);
146
147  bool get_cards(RegionIdx_t region_id, CardIdx_t* cards);
148
149  bool delete_entry(RegionIdx_t region_id);
150
151  bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const;
152
153  void add_entry(SparsePRTEntry* e);
154
155  SparsePRTEntry* get_entry(RegionIdx_t region_id) const;
156
157  void clear();
158
159  size_t capacity() const      { return _capacity;       }
160  size_t capacity_mask() const { return _capacity_mask;  }
161  size_t occupied_entries() const { return _occupied_entries; }
162  size_t occupied_cards() const   { return _occupied_cards;   }
163  size_t mem_size() const;
164
165  SparsePRTEntry* entry(int i) const { return (SparsePRTEntry*)((char*)_entries + SparsePRTEntry::size() * i); }
166
167  void print();
168};
169
170// ValueObj because will be embedded in HRRS iterator.
171class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
172  int _tbl_ind;         // [-1, 0.._rsht->_capacity)
173  int _bl_ind;          // [-1, 0.._rsht->_capacity)
174  short _card_ind;      // [0..SparsePRTEntry::cards_num())
175  RSHashTable* _rsht;
176
177  // If the bucket list pointed to by _bl_ind contains a card, sets
178  // _bl_ind to the index of that entry, and returns the card.
179  // Otherwise, returns SparseEntry::NullEntry.
180  CardIdx_t find_first_card_in_list();
181
182  // Computes the proper card index for the card whose offset in the
183  // current region (as indicated by _bl_ind) is "ci".
184  // This is subject to errors when there is iteration concurrent with
185  // modification, but these errors should be benign.
186  size_t compute_card_ind(CardIdx_t ci);
187
188public:
189  RSHashTableIter(RSHashTable* rsht) :
190    _tbl_ind(RSHashTable::NullEntry), // So that first increment gets to 0.
191    _bl_ind(RSHashTable::NullEntry),
192    _card_ind((SparsePRTEntry::cards_num() - 1)),
193    _rsht(rsht) {}
194
195  bool has_next(size_t& card_index);
196};
197
198// Concurrent access to a SparsePRT must be serialized by some external mutex.
199
200class SparsePRTIter;
201class SparsePRTCleanupTask;
202
203class SparsePRT VALUE_OBJ_CLASS_SPEC {
204  friend class SparsePRTCleanupTask;
205
206  //  Iterations are done on the _cur hash table, since they only need to
207  //  see entries visible at the start of a collection pause.
208  //  All other operations are done using the _next hash table.
209  RSHashTable* _cur;
210  RSHashTable* _next;
211
212  HeapRegion* _hr;
213
214  enum SomeAdditionalPrivateConstants {
215    InitialCapacity = 16
216  };
217
218  void expand();
219
220  bool _expanded;
221
222  bool expanded() { return _expanded; }
223  void set_expanded(bool b) { _expanded = b; }
224
225  SparsePRT* _next_expanded;
226
227  SparsePRT* next_expanded() { return _next_expanded; }
228  void set_next_expanded(SparsePRT* nxt) { _next_expanded = nxt; }
229
230  bool should_be_on_expanded_list();
231
232  static SparsePRT* _head_expanded_list;
233
234public:
235  SparsePRT(HeapRegion* hr);
236
237  ~SparsePRT();
238
239  size_t occupied() const { return _next->occupied_cards(); }
240  size_t mem_size() const;
241
242  // Attempts to ensure that the given card_index in the given region is in
243  // the sparse table.  If successful (because the card was already
244  // present, or because it was successfully added) returns "true".
245  // Otherwise, returns "false" to indicate that the addition would
246  // overflow the entry for the region.  The caller must transfer these
247  // entries to a larger-capacity representation.
248  bool add_card(RegionIdx_t region_id, CardIdx_t card_index);
249
250  // If the table hold an entry for "region_ind",  Copies its
251  // cards into "cards", which must be an array of length at least
252  // "SparePRTEntry::cards_num()", and returns "true"; otherwise,
253  // returns "false".
254  bool get_cards(RegionIdx_t region_ind, CardIdx_t* cards);
255
256  // Return the pointer to the entry associated with the given region.
257  SparsePRTEntry* get_entry(RegionIdx_t region_ind);
258
259  // If there is an entry for "region_ind", removes it and return "true";
260  // otherwise returns "false."
261  bool delete_entry(RegionIdx_t region_ind);
262
263  // Clear the table, and reinitialize to initial capacity.
264  void clear();
265
266  // Ensure that "_cur" and "_next" point to the same table.
267  void cleanup();
268
269  // Clean up all tables on the expanded list.  Called single threaded.
270  static void cleanup_all();
271  RSHashTable* cur() const { return _cur; }
272
273  static void add_to_expanded_list(SparsePRT* sprt);
274  static SparsePRT* get_from_expanded_list();
275
276  // The purpose of these three methods is to help the GC workers
277  // during the cleanup pause to recreate the expanded list, purging
278  // any tables from it that belong to regions that are freed during
279  // cleanup (if we don't purge those tables, there is a race that
280  // causes various crashes; see CR 7014261).
281  //
282  // We chose to recreate the expanded list, instead of purging
283  // entries from it by iterating over it, to avoid this serial phase
284  // at the end of the cleanup pause.
285  //
286  // The three methods below work as follows:
287  // * reset_for_cleanup_tasks() : Nulls the expanded list head at the
288  //   start of the cleanup pause.
289  // * do_cleanup_work() : Called by the cleanup workers for every
290  //   region that is not free / is being freed by the cleanup
291  //   pause. It creates a list of expanded tables whose head / tail
292  //   are on the thread-local SparsePRTCleanupTask object.
293  // * finish_cleanup_task() : Called by the cleanup workers after
294  //   they complete their cleanup task. It adds the local list into
295  //   the global expanded list. It assumes that the
296  //   ParGCRareEvent_lock is being held to ensure MT-safety.
297  static void reset_for_cleanup_tasks();
298  void do_cleanup_work(SparsePRTCleanupTask* sprt_cleanup_task);
299  static void finish_cleanup_task(SparsePRTCleanupTask* sprt_cleanup_task);
300
301  bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const {
302    return _next->contains_card(region_id, card_index);
303  }
304};
305
306class SparsePRTIter: public RSHashTableIter {
307public:
308  SparsePRTIter(const SparsePRT* sprt) :
309    RSHashTableIter(sprt->cur()) {}
310
311  bool has_next(size_t& card_index) {
312    return RSHashTableIter::has_next(card_index);
313  }
314};
315
316// This allows each worker during a cleanup pause to create a
317// thread-local list of sparse tables that have been expanded and need
318// to be processed at the beginning of the next GC pause. This lists
319// are concatenated into the single expanded list at the end of the
320// cleanup pause.
321class SparsePRTCleanupTask VALUE_OBJ_CLASS_SPEC {
322private:
323  SparsePRT* _head;
324  SparsePRT* _tail;
325
326public:
327  SparsePRTCleanupTask() : _head(NULL), _tail(NULL) { }
328
329  void add(SparsePRT* sprt);
330  SparsePRT* head() { return _head; }
331  SparsePRT* tail() { return _tail; }
332};
333
334#endif // SHARE_VM_GC_G1_SPARSEPRT_HPP
335