g1Allocator.cpp revision 13249:a2753984d2c1
1/*
2 * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/g1/g1Allocator.inline.hpp"
27#include "gc/g1/g1AllocRegion.inline.hpp"
28#include "gc/g1/g1EvacStats.inline.hpp"
29#include "gc/g1/g1CollectedHeap.inline.hpp"
30#include "gc/g1/heapRegion.inline.hpp"
31#include "gc/g1/heapRegionSet.inline.hpp"
32#include "utilities/align.hpp"
33
34G1DefaultAllocator::G1DefaultAllocator(G1CollectedHeap* heap) :
35  G1Allocator(heap),
36  _survivor_is_full(false),
37  _old_is_full(false),
38  _retained_old_gc_alloc_region(NULL),
39  _survivor_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Young)),
40  _old_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Old)) {
41}
42
43void G1DefaultAllocator::init_mutator_alloc_region() {
44  assert(_mutator_alloc_region.get() == NULL, "pre-condition");
45  _mutator_alloc_region.init();
46}
47
48void G1DefaultAllocator::release_mutator_alloc_region() {
49  _mutator_alloc_region.release();
50  assert(_mutator_alloc_region.get() == NULL, "post-condition");
51}
52
53void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
54                                            OldGCAllocRegion* old,
55                                            HeapRegion** retained_old) {
56  HeapRegion* retained_region = *retained_old;
57  *retained_old = NULL;
58  assert(retained_region == NULL || !retained_region->is_archive(),
59         "Archive region should not be alloc region (index %u)", retained_region->hrm_index());
60
61  // We will discard the current GC alloc region if:
62  // a) it's in the collection set (it can happen!),
63  // b) it's already full (no point in using it),
64  // c) it's empty (this means that it was emptied during
65  // a cleanup and it should be on the free list now), or
66  // d) it's humongous (this means that it was emptied
67  // during a cleanup and was added to the free list, but
68  // has been subsequently used to allocate a humongous
69  // object that may be less than the region size).
70  if (retained_region != NULL &&
71      !retained_region->in_collection_set() &&
72      !(retained_region->top() == retained_region->end()) &&
73      !retained_region->is_empty() &&
74      !retained_region->is_humongous()) {
75    retained_region->record_timestamp();
76    // The retained region was added to the old region set when it was
77    // retired. We have to remove it now, since we don't allow regions
78    // we allocate to in the region sets. We'll re-add it later, when
79    // it's retired again.
80    _g1h->old_set_remove(retained_region);
81    bool during_im = _g1h->collector_state()->during_initial_mark_pause();
82    retained_region->note_start_of_copying(during_im);
83    old->set(retained_region);
84    _g1h->hr_printer()->reuse(retained_region);
85    evacuation_info.set_alloc_regions_used_before(retained_region->used());
86  }
87}
88
89void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
90  assert_at_safepoint(true /* should_be_vm_thread */);
91
92  _survivor_is_full = false;
93  _old_is_full = false;
94
95  _survivor_gc_alloc_region.init();
96  _old_gc_alloc_region.init();
97  reuse_retained_old_region(evacuation_info,
98                            &_old_gc_alloc_region,
99                            &_retained_old_gc_alloc_region);
100}
101
102void G1DefaultAllocator::release_gc_alloc_regions(EvacuationInfo& evacuation_info) {
103  AllocationContext_t context = AllocationContext::current();
104  evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
105                                         old_gc_alloc_region(context)->count());
106  survivor_gc_alloc_region(context)->release();
107  // If we have an old GC alloc region to release, we'll save it in
108  // _retained_old_gc_alloc_region. If we don't
109  // _retained_old_gc_alloc_region will become NULL. This is what we
110  // want either way so no reason to check explicitly for either
111  // condition.
112  _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
113}
114
115void G1DefaultAllocator::abandon_gc_alloc_regions() {
116  assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
117  assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
118  _retained_old_gc_alloc_region = NULL;
119}
120
121bool G1DefaultAllocator::survivor_is_full(AllocationContext_t context) const {
122  return _survivor_is_full;
123}
124
125bool G1DefaultAllocator::old_is_full(AllocationContext_t context) const {
126  return _old_is_full;
127}
128
129void G1DefaultAllocator::set_survivor_full(AllocationContext_t context) {
130  _survivor_is_full = true;
131}
132
133void G1DefaultAllocator::set_old_full(AllocationContext_t context) {
134  _old_is_full = true;
135}
136
137G1PLAB::G1PLAB(size_t gclab_word_size) :
138  PLAB(gclab_word_size), _retired(true) { }
139
140size_t G1Allocator::unsafe_max_tlab_alloc(AllocationContext_t context) {
141  // Return the remaining space in the cur alloc region, but not less than
142  // the min TLAB size.
143
144  // Also, this value can be at most the humongous object threshold,
145  // since we can't allow tlabs to grow big enough to accommodate
146  // humongous objects.
147
148  HeapRegion* hr = mutator_alloc_region(context)->get();
149  size_t max_tlab = _g1h->max_tlab_size() * wordSize;
150  if (hr == NULL) {
151    return max_tlab;
152  } else {
153    return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
154  }
155}
156
157HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
158                                              size_t word_size,
159                                              AllocationContext_t context) {
160  size_t temp = 0;
161  HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp, context);
162  assert(result == NULL || temp == word_size,
163         "Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
164         word_size, temp, p2i(result));
165  return result;
166}
167
168HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
169                                              size_t min_word_size,
170                                              size_t desired_word_size,
171                                              size_t* actual_word_size,
172                                              AllocationContext_t context) {
173  switch (dest.value()) {
174    case InCSetState::Young:
175      return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context);
176    case InCSetState::Old:
177      return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context);
178    default:
179      ShouldNotReachHere();
180      return NULL; // Keep some compilers happy
181  }
182}
183
184HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size,
185                                                   size_t desired_word_size,
186                                                   size_t* actual_word_size,
187                                                   AllocationContext_t context) {
188  assert(!_g1h->is_humongous(desired_word_size),
189         "we should not be seeing humongous-size allocations in this path");
190
191  HeapWord* result = survivor_gc_alloc_region(context)->attempt_allocation(min_word_size,
192                                                                           desired_word_size,
193                                                                           actual_word_size,
194                                                                           false /* bot_updates */);
195  if (result == NULL && !survivor_is_full(context)) {
196    MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
197    result = survivor_gc_alloc_region(context)->attempt_allocation_locked(min_word_size,
198                                                                          desired_word_size,
199                                                                          actual_word_size,
200                                                                          false /* bot_updates */);
201    if (result == NULL) {
202      set_survivor_full(context);
203    }
204  }
205  if (result != NULL) {
206    _g1h->dirty_young_block(result, *actual_word_size);
207  }
208  return result;
209}
210
211HeapWord* G1Allocator::old_attempt_allocation(size_t min_word_size,
212                                              size_t desired_word_size,
213                                              size_t* actual_word_size,
214                                              AllocationContext_t context) {
215  assert(!_g1h->is_humongous(desired_word_size),
216         "we should not be seeing humongous-size allocations in this path");
217
218  HeapWord* result = old_gc_alloc_region(context)->attempt_allocation(min_word_size,
219                                                                      desired_word_size,
220                                                                      actual_word_size,
221                                                                      true /* bot_updates */);
222  if (result == NULL && !old_is_full(context)) {
223    MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
224    result = old_gc_alloc_region(context)->attempt_allocation_locked(min_word_size,
225                                                                     desired_word_size,
226                                                                     actual_word_size,
227                                                                     true /* bot_updates */);
228    if (result == NULL) {
229      set_old_full(context);
230    }
231  }
232  return result;
233}
234
235G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) :
236  _g1h(G1CollectedHeap::heap()),
237  _allocator(allocator),
238  _survivor_alignment_bytes(calc_survivor_alignment_bytes()) {
239  for (size_t i = 0; i < ARRAY_SIZE(_direct_allocated); i++) {
240    _direct_allocated[i] = 0;
241  }
242}
243
244bool G1PLABAllocator::may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const {
245  return (allocation_word_sz * 100 < buffer_size * ParallelGCBufferWastePct);
246}
247
248HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest,
249                                                       size_t word_sz,
250                                                       AllocationContext_t context,
251                                                       bool* plab_refill_failed) {
252  size_t plab_word_size = G1CollectedHeap::heap()->desired_plab_sz(dest);
253  size_t required_in_plab = PLAB::size_required_for_allocation(word_sz);
254
255  // Only get a new PLAB if the allocation fits and it would not waste more than
256  // ParallelGCBufferWastePct in the existing buffer.
257  if ((required_in_plab <= plab_word_size) &&
258    may_throw_away_buffer(required_in_plab, plab_word_size)) {
259
260    G1PLAB* alloc_buf = alloc_buffer(dest, context);
261    alloc_buf->retire();
262
263    size_t actual_plab_size = 0;
264    HeapWord* buf = _allocator->par_allocate_during_gc(dest,
265                                                       required_in_plab,
266                                                       plab_word_size,
267                                                       &actual_plab_size,
268                                                       context);
269
270    assert(buf == NULL || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)),
271           "Requested at minimum " SIZE_FORMAT ", desired " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
272           required_in_plab, plab_word_size, actual_plab_size, p2i(buf));
273
274    if (buf != NULL) {
275      alloc_buf->set_buf(buf, actual_plab_size);
276
277      HeapWord* const obj = alloc_buf->allocate(word_sz);
278      assert(obj != NULL, "PLAB should have been big enough, tried to allocate "
279                          SIZE_FORMAT " requiring " SIZE_FORMAT " PLAB size " SIZE_FORMAT,
280                          word_sz, required_in_plab, plab_word_size);
281      return obj;
282    }
283    // Otherwise.
284    *plab_refill_failed = true;
285  }
286  // Try direct allocation.
287  HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz, context);
288  if (result != NULL) {
289    _direct_allocated[dest.value()] += word_sz;
290  }
291  return result;
292}
293
294void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
295  alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
296}
297
298G1DefaultPLABAllocator::G1DefaultPLABAllocator(G1Allocator* allocator) :
299  G1PLABAllocator(allocator),
300  _surviving_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Young)),
301  _tenured_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Old)) {
302  for (uint state = 0; state < InCSetState::Num; state++) {
303    _alloc_buffers[state] = NULL;
304  }
305  _alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer;
306  _alloc_buffers[InCSetState::Old]  = &_tenured_alloc_buffer;
307}
308
309void G1DefaultPLABAllocator::flush_and_retire_stats() {
310  for (uint state = 0; state < InCSetState::Num; state++) {
311    G1PLAB* const buf = _alloc_buffers[state];
312    if (buf != NULL) {
313      G1EvacStats* stats = _g1h->alloc_buffer_stats(state);
314      buf->flush_and_retire_stats(stats);
315      stats->add_direct_allocated(_direct_allocated[state]);
316      _direct_allocated[state] = 0;
317    }
318  }
319}
320
321void G1DefaultPLABAllocator::waste(size_t& wasted, size_t& undo_wasted) {
322  wasted = 0;
323  undo_wasted = 0;
324  for (uint state = 0; state < InCSetState::Num; state++) {
325    G1PLAB * const buf = _alloc_buffers[state];
326    if (buf != NULL) {
327      wasted += buf->waste();
328      undo_wasted += buf->undo_waste();
329    }
330  }
331}
332
333bool G1ArchiveAllocator::_archive_check_enabled = false;
334G1ArchiveRegionMap G1ArchiveAllocator::_archive_region_map;
335
336G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h) {
337  // Create the archive allocator, and also enable archive object checking
338  // in mark-sweep, since we will be creating archive regions.
339  G1ArchiveAllocator* result =  new G1ArchiveAllocator(g1h);
340  enable_archive_object_check();
341  return result;
342}
343
344bool G1ArchiveAllocator::alloc_new_region() {
345  // Allocate the highest free region in the reserved heap,
346  // and add it to our list of allocated regions. It is marked
347  // archive and added to the old set.
348  HeapRegion* hr = _g1h->alloc_highest_free_region();
349  if (hr == NULL) {
350    return false;
351  }
352  assert(hr->is_empty(), "expected empty region (index %u)", hr->hrm_index());
353  hr->set_archive();
354  _g1h->old_set_add(hr);
355  _g1h->hr_printer()->alloc(hr);
356  _allocated_regions.append(hr);
357  _allocation_region = hr;
358
359  // Set up _bottom and _max to begin allocating in the lowest
360  // min_region_size'd chunk of the allocated G1 region.
361  _bottom = hr->bottom();
362  _max = _bottom + HeapRegion::min_region_size_in_words();
363
364  // Tell mark-sweep that objects in this region are not to be marked.
365  set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), true);
366
367  // Since we've modified the old set, call update_sizes.
368  _g1h->g1mm()->update_sizes();
369  return true;
370}
371
372HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) {
373  assert(word_size != 0, "size must not be zero");
374  if (_allocation_region == NULL) {
375    if (!alloc_new_region()) {
376      return NULL;
377    }
378  }
379  HeapWord* old_top = _allocation_region->top();
380  assert(_bottom >= _allocation_region->bottom(),
381         "inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
382         p2i(_bottom), p2i(_allocation_region->bottom()));
383  assert(_max <= _allocation_region->end(),
384         "inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,
385         p2i(_max), p2i(_allocation_region->end()));
386  assert(_bottom <= old_top && old_top <= _max,
387         "inconsistent allocation state: expected "
388         PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
389         p2i(_bottom), p2i(old_top), p2i(_max));
390
391  // Allocate the next word_size words in the current allocation chunk.
392  // If allocation would cross the _max boundary, insert a filler and begin
393  // at the base of the next min_region_size'd chunk. Also advance to the next
394  // chunk if we don't yet cross the boundary, but the remainder would be too
395  // small to fill.
396  HeapWord* new_top = old_top + word_size;
397  size_t remainder = pointer_delta(_max, new_top);
398  if ((new_top > _max) ||
399      ((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) {
400    if (old_top != _max) {
401      size_t fill_size = pointer_delta(_max, old_top);
402      CollectedHeap::fill_with_object(old_top, fill_size);
403      _summary_bytes_used += fill_size * HeapWordSize;
404    }
405    _allocation_region->set_top(_max);
406    old_top = _bottom = _max;
407
408    // Check if we've just used up the last min_region_size'd chunk
409    // in the current region, and if so, allocate a new one.
410    if (_bottom != _allocation_region->end()) {
411      _max = _bottom + HeapRegion::min_region_size_in_words();
412    } else {
413      if (!alloc_new_region()) {
414        return NULL;
415      }
416      old_top = _allocation_region->bottom();
417    }
418  }
419  _allocation_region->set_top(old_top + word_size);
420  _summary_bytes_used += word_size * HeapWordSize;
421
422  return old_top;
423}
424
425void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
426                                          size_t end_alignment_in_bytes) {
427  assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
428         "alignment " SIZE_FORMAT " too large", end_alignment_in_bytes);
429  assert(is_aligned(end_alignment_in_bytes, HeapWordSize),
430         "alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize);
431
432  // If we've allocated nothing, simply return.
433  if (_allocation_region == NULL) {
434    return;
435  }
436
437  // If an end alignment was requested, insert filler objects.
438  if (end_alignment_in_bytes != 0) {
439    HeapWord* currtop = _allocation_region->top();
440    HeapWord* newtop = align_up(currtop, end_alignment_in_bytes);
441    size_t fill_size = pointer_delta(newtop, currtop);
442    if (fill_size != 0) {
443      if (fill_size < CollectedHeap::min_fill_size()) {
444        // If the required fill is smaller than we can represent,
445        // bump up to the next aligned address. We know we won't exceed the current
446        // region boundary because the max supported alignment is smaller than the min
447        // region size, and because the allocation code never leaves space smaller than
448        // the min_fill_size at the top of the current allocation region.
449        newtop = align_up(currtop + CollectedHeap::min_fill_size(),
450                          end_alignment_in_bytes);
451        fill_size = pointer_delta(newtop, currtop);
452      }
453      HeapWord* fill = archive_mem_allocate(fill_size);
454      CollectedHeap::fill_with_objects(fill, fill_size);
455    }
456  }
457
458  // Loop through the allocated regions, and create MemRegions summarizing
459  // the allocated address range, combining contiguous ranges. Add the
460  // MemRegions to the GrowableArray provided by the caller.
461  int index = _allocated_regions.length() - 1;
462  assert(_allocated_regions.at(index) == _allocation_region,
463         "expected region %u at end of array, found %u",
464         _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index());
465  HeapWord* base_address = _allocation_region->bottom();
466  HeapWord* top = base_address;
467
468  while (index >= 0) {
469    HeapRegion* next = _allocated_regions.at(index);
470    HeapWord* new_base = next->bottom();
471    HeapWord* new_top = next->top();
472    if (new_base != top) {
473      ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
474      base_address = new_base;
475    }
476    top = new_top;
477    index = index - 1;
478  }
479
480  assert(top != base_address, "zero-sized range, address " PTR_FORMAT, p2i(base_address));
481  ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
482  _allocated_regions.clear();
483  _allocation_region = NULL;
484};
485