g1Allocator.cpp revision 13242:fcb4803050e8
1/*
2 * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/g1/g1Allocator.inline.hpp"
27#include "gc/g1/g1AllocRegion.inline.hpp"
28#include "gc/g1/g1EvacStats.inline.hpp"
29#include "gc/g1/g1CollectedHeap.inline.hpp"
30#include "gc/g1/heapRegion.inline.hpp"
31#include "gc/g1/heapRegionSet.inline.hpp"
32
33G1DefaultAllocator::G1DefaultAllocator(G1CollectedHeap* heap) :
34  G1Allocator(heap),
35  _survivor_is_full(false),
36  _old_is_full(false),
37  _retained_old_gc_alloc_region(NULL),
38  _survivor_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Young)),
39  _old_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Old)) {
40}
41
42void G1DefaultAllocator::init_mutator_alloc_region() {
43  assert(_mutator_alloc_region.get() == NULL, "pre-condition");
44  _mutator_alloc_region.init();
45}
46
47void G1DefaultAllocator::release_mutator_alloc_region() {
48  _mutator_alloc_region.release();
49  assert(_mutator_alloc_region.get() == NULL, "post-condition");
50}
51
52void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
53                                            OldGCAllocRegion* old,
54                                            HeapRegion** retained_old) {
55  HeapRegion* retained_region = *retained_old;
56  *retained_old = NULL;
57  assert(retained_region == NULL || !retained_region->is_archive(),
58         "Archive region should not be alloc region (index %u)", retained_region->hrm_index());
59
60  // We will discard the current GC alloc region if:
61  // a) it's in the collection set (it can happen!),
62  // b) it's already full (no point in using it),
63  // c) it's empty (this means that it was emptied during
64  // a cleanup and it should be on the free list now), or
65  // d) it's humongous (this means that it was emptied
66  // during a cleanup and was added to the free list, but
67  // has been subsequently used to allocate a humongous
68  // object that may be less than the region size).
69  if (retained_region != NULL &&
70      !retained_region->in_collection_set() &&
71      !(retained_region->top() == retained_region->end()) &&
72      !retained_region->is_empty() &&
73      !retained_region->is_humongous()) {
74    retained_region->record_timestamp();
75    // The retained region was added to the old region set when it was
76    // retired. We have to remove it now, since we don't allow regions
77    // we allocate to in the region sets. We'll re-add it later, when
78    // it's retired again.
79    _g1h->old_set_remove(retained_region);
80    bool during_im = _g1h->collector_state()->during_initial_mark_pause();
81    retained_region->note_start_of_copying(during_im);
82    old->set(retained_region);
83    _g1h->hr_printer()->reuse(retained_region);
84    evacuation_info.set_alloc_regions_used_before(retained_region->used());
85  }
86}
87
88void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
89  assert_at_safepoint(true /* should_be_vm_thread */);
90
91  _survivor_is_full = false;
92  _old_is_full = false;
93
94  _survivor_gc_alloc_region.init();
95  _old_gc_alloc_region.init();
96  reuse_retained_old_region(evacuation_info,
97                            &_old_gc_alloc_region,
98                            &_retained_old_gc_alloc_region);
99}
100
101void G1DefaultAllocator::release_gc_alloc_regions(EvacuationInfo& evacuation_info) {
102  AllocationContext_t context = AllocationContext::current();
103  evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
104                                         old_gc_alloc_region(context)->count());
105  survivor_gc_alloc_region(context)->release();
106  // If we have an old GC alloc region to release, we'll save it in
107  // _retained_old_gc_alloc_region. If we don't
108  // _retained_old_gc_alloc_region will become NULL. This is what we
109  // want either way so no reason to check explicitly for either
110  // condition.
111  _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
112}
113
114void G1DefaultAllocator::abandon_gc_alloc_regions() {
115  assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
116  assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
117  _retained_old_gc_alloc_region = NULL;
118}
119
120bool G1DefaultAllocator::survivor_is_full(AllocationContext_t context) const {
121  return _survivor_is_full;
122}
123
124bool G1DefaultAllocator::old_is_full(AllocationContext_t context) const {
125  return _old_is_full;
126}
127
128void G1DefaultAllocator::set_survivor_full(AllocationContext_t context) {
129  _survivor_is_full = true;
130}
131
132void G1DefaultAllocator::set_old_full(AllocationContext_t context) {
133  _old_is_full = true;
134}
135
136G1PLAB::G1PLAB(size_t gclab_word_size) :
137  PLAB(gclab_word_size), _retired(true) { }
138
139size_t G1Allocator::unsafe_max_tlab_alloc(AllocationContext_t context) {
140  // Return the remaining space in the cur alloc region, but not less than
141  // the min TLAB size.
142
143  // Also, this value can be at most the humongous object threshold,
144  // since we can't allow tlabs to grow big enough to accommodate
145  // humongous objects.
146
147  HeapRegion* hr = mutator_alloc_region(context)->get();
148  size_t max_tlab = _g1h->max_tlab_size() * wordSize;
149  if (hr == NULL) {
150    return max_tlab;
151  } else {
152    return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
153  }
154}
155
156HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
157                                              size_t word_size,
158                                              AllocationContext_t context) {
159  size_t temp = 0;
160  HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp, context);
161  assert(result == NULL || temp == word_size,
162         "Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
163         word_size, temp, p2i(result));
164  return result;
165}
166
167HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
168                                              size_t min_word_size,
169                                              size_t desired_word_size,
170                                              size_t* actual_word_size,
171                                              AllocationContext_t context) {
172  switch (dest.value()) {
173    case InCSetState::Young:
174      return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context);
175    case InCSetState::Old:
176      return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context);
177    default:
178      ShouldNotReachHere();
179      return NULL; // Keep some compilers happy
180  }
181}
182
183HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size,
184                                                   size_t desired_word_size,
185                                                   size_t* actual_word_size,
186                                                   AllocationContext_t context) {
187  assert(!_g1h->is_humongous(desired_word_size),
188         "we should not be seeing humongous-size allocations in this path");
189
190  HeapWord* result = survivor_gc_alloc_region(context)->attempt_allocation(min_word_size,
191                                                                           desired_word_size,
192                                                                           actual_word_size,
193                                                                           false /* bot_updates */);
194  if (result == NULL && !survivor_is_full(context)) {
195    MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
196    result = survivor_gc_alloc_region(context)->attempt_allocation_locked(min_word_size,
197                                                                          desired_word_size,
198                                                                          actual_word_size,
199                                                                          false /* bot_updates */);
200    if (result == NULL) {
201      set_survivor_full(context);
202    }
203  }
204  if (result != NULL) {
205    _g1h->dirty_young_block(result, *actual_word_size);
206  }
207  return result;
208}
209
210HeapWord* G1Allocator::old_attempt_allocation(size_t min_word_size,
211                                              size_t desired_word_size,
212                                              size_t* actual_word_size,
213                                              AllocationContext_t context) {
214  assert(!_g1h->is_humongous(desired_word_size),
215         "we should not be seeing humongous-size allocations in this path");
216
217  HeapWord* result = old_gc_alloc_region(context)->attempt_allocation(min_word_size,
218                                                                      desired_word_size,
219                                                                      actual_word_size,
220                                                                      true /* bot_updates */);
221  if (result == NULL && !old_is_full(context)) {
222    MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
223    result = old_gc_alloc_region(context)->attempt_allocation_locked(min_word_size,
224                                                                     desired_word_size,
225                                                                     actual_word_size,
226                                                                     true /* bot_updates */);
227    if (result == NULL) {
228      set_old_full(context);
229    }
230  }
231  return result;
232}
233
234G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) :
235  _g1h(G1CollectedHeap::heap()),
236  _allocator(allocator),
237  _survivor_alignment_bytes(calc_survivor_alignment_bytes()) {
238  for (size_t i = 0; i < ARRAY_SIZE(_direct_allocated); i++) {
239    _direct_allocated[i] = 0;
240  }
241}
242
243bool G1PLABAllocator::may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const {
244  return (allocation_word_sz * 100 < buffer_size * ParallelGCBufferWastePct);
245}
246
247HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest,
248                                                       size_t word_sz,
249                                                       AllocationContext_t context,
250                                                       bool* plab_refill_failed) {
251  size_t plab_word_size = G1CollectedHeap::heap()->desired_plab_sz(dest);
252  size_t required_in_plab = PLAB::size_required_for_allocation(word_sz);
253
254  // Only get a new PLAB if the allocation fits and it would not waste more than
255  // ParallelGCBufferWastePct in the existing buffer.
256  if ((required_in_plab <= plab_word_size) &&
257    may_throw_away_buffer(required_in_plab, plab_word_size)) {
258
259    G1PLAB* alloc_buf = alloc_buffer(dest, context);
260    alloc_buf->retire();
261
262    size_t actual_plab_size = 0;
263    HeapWord* buf = _allocator->par_allocate_during_gc(dest,
264                                                       required_in_plab,
265                                                       plab_word_size,
266                                                       &actual_plab_size,
267                                                       context);
268
269    assert(buf == NULL || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)),
270           "Requested at minimum " SIZE_FORMAT ", desired " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
271           required_in_plab, plab_word_size, actual_plab_size, p2i(buf));
272
273    if (buf != NULL) {
274      alloc_buf->set_buf(buf, actual_plab_size);
275
276      HeapWord* const obj = alloc_buf->allocate(word_sz);
277      assert(obj != NULL, "PLAB should have been big enough, tried to allocate "
278                          SIZE_FORMAT " requiring " SIZE_FORMAT " PLAB size " SIZE_FORMAT,
279                          word_sz, required_in_plab, plab_word_size);
280      return obj;
281    }
282    // Otherwise.
283    *plab_refill_failed = true;
284  }
285  // Try direct allocation.
286  HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz, context);
287  if (result != NULL) {
288    _direct_allocated[dest.value()] += word_sz;
289  }
290  return result;
291}
292
293void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
294  alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
295}
296
297G1DefaultPLABAllocator::G1DefaultPLABAllocator(G1Allocator* allocator) :
298  G1PLABAllocator(allocator),
299  _surviving_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Young)),
300  _tenured_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Old)) {
301  for (uint state = 0; state < InCSetState::Num; state++) {
302    _alloc_buffers[state] = NULL;
303  }
304  _alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer;
305  _alloc_buffers[InCSetState::Old]  = &_tenured_alloc_buffer;
306}
307
308void G1DefaultPLABAllocator::flush_and_retire_stats() {
309  for (uint state = 0; state < InCSetState::Num; state++) {
310    G1PLAB* const buf = _alloc_buffers[state];
311    if (buf != NULL) {
312      G1EvacStats* stats = _g1h->alloc_buffer_stats(state);
313      buf->flush_and_retire_stats(stats);
314      stats->add_direct_allocated(_direct_allocated[state]);
315      _direct_allocated[state] = 0;
316    }
317  }
318}
319
320void G1DefaultPLABAllocator::waste(size_t& wasted, size_t& undo_wasted) {
321  wasted = 0;
322  undo_wasted = 0;
323  for (uint state = 0; state < InCSetState::Num; state++) {
324    G1PLAB * const buf = _alloc_buffers[state];
325    if (buf != NULL) {
326      wasted += buf->waste();
327      undo_wasted += buf->undo_waste();
328    }
329  }
330}
331
332bool G1ArchiveAllocator::_archive_check_enabled = false;
333G1ArchiveRegionMap G1ArchiveAllocator::_archive_region_map;
334
335G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h) {
336  // Create the archive allocator, and also enable archive object checking
337  // in mark-sweep, since we will be creating archive regions.
338  G1ArchiveAllocator* result =  new G1ArchiveAllocator(g1h);
339  enable_archive_object_check();
340  return result;
341}
342
343bool G1ArchiveAllocator::alloc_new_region() {
344  // Allocate the highest free region in the reserved heap,
345  // and add it to our list of allocated regions. It is marked
346  // archive and added to the old set.
347  HeapRegion* hr = _g1h->alloc_highest_free_region();
348  if (hr == NULL) {
349    return false;
350  }
351  assert(hr->is_empty(), "expected empty region (index %u)", hr->hrm_index());
352  hr->set_archive();
353  _g1h->old_set_add(hr);
354  _g1h->hr_printer()->alloc(hr);
355  _allocated_regions.append(hr);
356  _allocation_region = hr;
357
358  // Set up _bottom and _max to begin allocating in the lowest
359  // min_region_size'd chunk of the allocated G1 region.
360  _bottom = hr->bottom();
361  _max = _bottom + HeapRegion::min_region_size_in_words();
362
363  // Tell mark-sweep that objects in this region are not to be marked.
364  set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), true);
365
366  // Since we've modified the old set, call update_sizes.
367  _g1h->g1mm()->update_sizes();
368  return true;
369}
370
371HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) {
372  assert(word_size != 0, "size must not be zero");
373  if (_allocation_region == NULL) {
374    if (!alloc_new_region()) {
375      return NULL;
376    }
377  }
378  HeapWord* old_top = _allocation_region->top();
379  assert(_bottom >= _allocation_region->bottom(),
380         "inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
381         p2i(_bottom), p2i(_allocation_region->bottom()));
382  assert(_max <= _allocation_region->end(),
383         "inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,
384         p2i(_max), p2i(_allocation_region->end()));
385  assert(_bottom <= old_top && old_top <= _max,
386         "inconsistent allocation state: expected "
387         PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
388         p2i(_bottom), p2i(old_top), p2i(_max));
389
390  // Allocate the next word_size words in the current allocation chunk.
391  // If allocation would cross the _max boundary, insert a filler and begin
392  // at the base of the next min_region_size'd chunk. Also advance to the next
393  // chunk if we don't yet cross the boundary, but the remainder would be too
394  // small to fill.
395  HeapWord* new_top = old_top + word_size;
396  size_t remainder = pointer_delta(_max, new_top);
397  if ((new_top > _max) ||
398      ((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) {
399    if (old_top != _max) {
400      size_t fill_size = pointer_delta(_max, old_top);
401      CollectedHeap::fill_with_object(old_top, fill_size);
402      _summary_bytes_used += fill_size * HeapWordSize;
403    }
404    _allocation_region->set_top(_max);
405    old_top = _bottom = _max;
406
407    // Check if we've just used up the last min_region_size'd chunk
408    // in the current region, and if so, allocate a new one.
409    if (_bottom != _allocation_region->end()) {
410      _max = _bottom + HeapRegion::min_region_size_in_words();
411    } else {
412      if (!alloc_new_region()) {
413        return NULL;
414      }
415      old_top = _allocation_region->bottom();
416    }
417  }
418  _allocation_region->set_top(old_top + word_size);
419  _summary_bytes_used += word_size * HeapWordSize;
420
421  return old_top;
422}
423
424void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
425                                          size_t end_alignment_in_bytes) {
426  assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
427         "alignment " SIZE_FORMAT " too large", end_alignment_in_bytes);
428  assert(is_size_aligned(end_alignment_in_bytes, HeapWordSize),
429         "alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize);
430
431  // If we've allocated nothing, simply return.
432  if (_allocation_region == NULL) {
433    return;
434  }
435
436  // If an end alignment was requested, insert filler objects.
437  if (end_alignment_in_bytes != 0) {
438    HeapWord* currtop = _allocation_region->top();
439    HeapWord* newtop = align_ptr_up(currtop, end_alignment_in_bytes);
440    size_t fill_size = pointer_delta(newtop, currtop);
441    if (fill_size != 0) {
442      if (fill_size < CollectedHeap::min_fill_size()) {
443        // If the required fill is smaller than we can represent,
444        // bump up to the next aligned address. We know we won't exceed the current
445        // region boundary because the max supported alignment is smaller than the min
446        // region size, and because the allocation code never leaves space smaller than
447        // the min_fill_size at the top of the current allocation region.
448        newtop = align_ptr_up(currtop + CollectedHeap::min_fill_size(),
449                              end_alignment_in_bytes);
450        fill_size = pointer_delta(newtop, currtop);
451      }
452      HeapWord* fill = archive_mem_allocate(fill_size);
453      CollectedHeap::fill_with_objects(fill, fill_size);
454    }
455  }
456
457  // Loop through the allocated regions, and create MemRegions summarizing
458  // the allocated address range, combining contiguous ranges. Add the
459  // MemRegions to the GrowableArray provided by the caller.
460  int index = _allocated_regions.length() - 1;
461  assert(_allocated_regions.at(index) == _allocation_region,
462         "expected region %u at end of array, found %u",
463         _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index());
464  HeapWord* base_address = _allocation_region->bottom();
465  HeapWord* top = base_address;
466
467  while (index >= 0) {
468    HeapRegion* next = _allocated_regions.at(index);
469    HeapWord* new_base = next->bottom();
470    HeapWord* new_top = next->top();
471    if (new_base != top) {
472      ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
473      base_address = new_base;
474    }
475    top = new_top;
476    index = index - 1;
477  }
478
479  assert(top != base_address, "zero-sized range, address " PTR_FORMAT, p2i(base_address));
480  ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
481  _allocated_regions.clear();
482  _allocation_region = NULL;
483};
484