g1BlockOffsetTable.hpp revision 9924:58d20e8f8e2a
1271294Sngie/*
2271294Sngie * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
3271294Sngie * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4271294Sngie *
5271294Sngie * This code is free software; you can redistribute it and/or modify it
6271294Sngie * under the terms of the GNU General Public License version 2 only, as
7271294Sngie * published by the Free Software Foundation.
8271294Sngie *
9271294Sngie * This code is distributed in the hope that it will be useful, but WITHOUT
10271294Sngie * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11271294Sngie * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12271294Sngie * version 2 for more details (a copy is included in the LICENSE file that
13271294Sngie * accompanied this code).
14271294Sngie *
15271294Sngie * You should have received a copy of the GNU General Public License version
16271294Sngie * 2 along with this work; if not, write to the Free Software Foundation,
17271294Sngie * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18271294Sngie *
19271294Sngie * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20271294Sngie * or visit www.oracle.com if you need additional information or have any
21271294Sngie * questions.
22271294Sngie *
23271294Sngie */
24271294Sngie
25#ifndef SHARE_VM_GC_G1_G1BLOCKOFFSETTABLE_HPP
26#define SHARE_VM_GC_G1_G1BLOCKOFFSETTABLE_HPP
27
28#include "gc/g1/g1RegionToSpaceMapper.hpp"
29#include "gc/shared/blockOffsetTable.hpp"
30#include "memory/memRegion.hpp"
31#include "memory/virtualspace.hpp"
32#include "utilities/globalDefinitions.hpp"
33
34// Forward declarations
35class G1BlockOffsetTable;
36class G1ContiguousSpace;
37
38// This implementation of "G1BlockOffsetTable" divides the covered region
39// into "N"-word subregions (where "N" = 2^"LogN".  An array with an entry
40// for each such subregion indicates how far back one must go to find the
41// start of the chunk that includes the first word of the subregion.
42//
43// Each G1BlockOffsetTablePart is owned by a G1ContiguousSpace.
44
45class G1BlockOffsetTable: public CHeapObj<mtGC> {
46  friend class G1BlockOffsetTablePart;
47  friend class VMStructs;
48
49private:
50  // The reserved region covered by the table.
51  MemRegion _reserved;
52
53  // Array for keeping offsets for retrieving object start fast given an
54  // address.
55  u_char* _offset_array;          // byte array keeping backwards offsets
56
57  void check_offset(size_t offset, const char* msg) const {
58    assert(offset <= BOTConstants::N_words,
59           "%s - offset: " SIZE_FORMAT ", N_words: %u",
60           msg, offset, BOTConstants::N_words);
61  }
62
63  // Bounds checking accessors:
64  // For performance these have to devolve to array accesses in product builds.
65  inline u_char offset_array(size_t index) const;
66
67  void set_offset_array_raw(size_t index, u_char offset) {
68    _offset_array[index] = offset;
69  }
70
71  inline void set_offset_array(size_t index, u_char offset);
72
73  inline void set_offset_array(size_t index, HeapWord* high, HeapWord* low);
74
75  inline void set_offset_array(size_t left, size_t right, u_char offset);
76
77  bool is_card_boundary(HeapWord* p) const;
78
79  void check_index(size_t index, const char* msg) const NOT_DEBUG_RETURN;
80
81public:
82
83  // Return the number of slots needed for an offset array
84  // that covers mem_region_words words.
85  static size_t compute_size(size_t mem_region_words) {
86    size_t number_of_slots = (mem_region_words / BOTConstants::N_words);
87    return ReservedSpace::allocation_align_size_up(number_of_slots);
88  }
89
90  // Returns how many bytes of the heap a single byte of the BOT corresponds to.
91  static size_t heap_map_factor() {
92    return BOTConstants::N_bytes;
93  }
94
95  // Initialize the Block Offset Table to cover the memory region passed
96  // in the heap parameter.
97  G1BlockOffsetTable(MemRegion heap, G1RegionToSpaceMapper* storage);
98
99  // Return the appropriate index into "_offset_array" for "p".
100  inline size_t index_for(const void* p) const;
101  inline size_t index_for_raw(const void* p) const;
102
103  // Return the address indicating the start of the region corresponding to
104  // "index" in "_offset_array".
105  inline HeapWord* address_for_index(size_t index) const;
106  // Variant of address_for_index that does not check the index for validity.
107  inline HeapWord* address_for_index_raw(size_t index) const {
108    return _reserved.start() + (index << BOTConstants::LogN_words);
109  }
110};
111
112class G1BlockOffsetTablePart VALUE_OBJ_CLASS_SPEC {
113  friend class G1BlockOffsetTable;
114  friend class VMStructs;
115private:
116  // allocation boundary at which offset array must be updated
117  HeapWord* _next_offset_threshold;
118  size_t    _next_offset_index;      // index corresponding to that boundary
119
120  // This is the global BlockOffsetTable.
121  G1BlockOffsetTable* _bot;
122
123  // The space that owns this subregion.
124  G1ContiguousSpace* _space;
125
126  // Sets the entries
127  // corresponding to the cards starting at "start" and ending at "end"
128  // to point back to the card before "start": the interval [start, end)
129  // is right-open.
130  void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end);
131  // Same as above, except that the args here are a card _index_ interval
132  // that is closed: [start_index, end_index]
133  void set_remainder_to_point_to_start_incl(size_t start, size_t end);
134
135  // Zero out the entry for _bottom (offset will be zero). Does not check for availability of the
136  // memory first.
137  void zero_bottom_entry_raw();
138  // Variant of initialize_threshold that does not check for availability of the
139  // memory first.
140  HeapWord* initialize_threshold_raw();
141
142  inline size_t block_size(const HeapWord* p) const;
143
144  // Returns the address of a block whose start is at most "addr".
145  // If "has_max_index" is true, "assumes "max_index" is the last valid one
146  // in the array.
147  inline HeapWord* block_at_or_preceding(const void* addr,
148                                         bool has_max_index,
149                                         size_t max_index) const;
150
151  // "q" is a block boundary that is <= "addr"; "n" is the address of the
152  // next block (or the end of the space.)  Return the address of the
153  // beginning of the block that contains "addr".  Does so without side
154  // effects (see, e.g., spec of  block_start.)
155  inline HeapWord* forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
156                                                          const void* addr) const;
157
158  // "q" is a block boundary that is <= "addr"; return the address of the
159  // beginning of the block that contains "addr".  May have side effects
160  // on "this", by updating imprecise entries.
161  inline HeapWord* forward_to_block_containing_addr(HeapWord* q,
162                                                    const void* addr);
163
164  // "q" is a block boundary that is <= "addr"; "n" is the address of the
165  // next block (or the end of the space.)  Return the address of the
166  // beginning of the block that contains "addr".  May have side effects
167  // on "this", by updating imprecise entries.
168  HeapWord* forward_to_block_containing_addr_slow(HeapWord* q,
169                                                  HeapWord* n,
170                                                  const void* addr);
171
172  // Requires that "*threshold_" be the first array entry boundary at or
173  // above "blk_start", and that "*index_" be the corresponding array
174  // index.  If the block starts at or crosses "*threshold_", records
175  // "blk_start" as the appropriate block start for the array index
176  // starting at "*threshold_", and for any other indices crossed by the
177  // block.  Updates "*threshold_" and "*index_" to correspond to the first
178  // index after the block end.
179  void alloc_block_work(HeapWord** threshold_, size_t* index_,
180                        HeapWord* blk_start, HeapWord* blk_end);
181
182  void check_all_cards(size_t left_card, size_t right_card) const;
183
184public:
185  //  The elements of the array are initialized to zero.
186  G1BlockOffsetTablePart(G1BlockOffsetTable* array, G1ContiguousSpace* gsp);
187
188  void verify() const;
189
190  // Returns the address of the start of the block containing "addr", or
191  // else "null" if it is covered by no block.  (May have side effects,
192  // namely updating of shared array entries that "point" too far
193  // backwards.  This can occur, for example, when lab allocation is used
194  // in a space covered by the table.)
195  inline HeapWord* block_start(const void* addr);
196  // Same as above, but does not have any of the possible side effects
197  // discussed above.
198  inline HeapWord* block_start_const(const void* addr) const;
199
200  // Initialize the threshold to reflect the first boundary after the
201  // bottom of the covered region.
202  HeapWord* initialize_threshold();
203
204  void reset_bot() {
205    zero_bottom_entry_raw();
206    initialize_threshold_raw();
207  }
208
209  // Return the next threshold, the point at which the table should be
210  // updated.
211  HeapWord* threshold() const { return _next_offset_threshold; }
212
213  // These must be guaranteed to work properly (i.e., do nothing)
214  // when "blk_start" ("blk" for second version) is "NULL".  In this
215  // implementation, that's true because NULL is represented as 0, and thus
216  // never exceeds the "_next_offset_threshold".
217  void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
218    if (blk_end > _next_offset_threshold) {
219      alloc_block_work(&_next_offset_threshold, &_next_offset_index, blk_start, blk_end);
220    }
221  }
222  void alloc_block(HeapWord* blk, size_t size) {
223    alloc_block(blk, blk+size);
224  }
225
226  void set_for_starts_humongous(HeapWord* obj_top, size_t fill_size);
227
228  void print_on(outputStream* out) PRODUCT_RETURN;
229};
230
231#endif // SHARE_VM_GC_G1_G1BLOCKOFFSETTABLE_HPP
232