g1Allocator.cpp revision 9056:dc9930a04ab0
150476Speter/*
23229Spst * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
33229Spst * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
43229Spst *
53229Spst * This code is free software; you can redistribute it and/or modify it
63229Spst * under the terms of the GNU General Public License version 2 only, as
73229Spst * published by the Free Software Foundation.
83229Spst *
93229Spst * This code is distributed in the hope that it will be useful, but WITHOUT
103229Spst * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
113229Spst * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
123229Spst * version 2 for more details (a copy is included in the LICENSE file that
133229Spst * accompanied this code).
143229Spst *
153229Spst * You should have received a copy of the GNU General Public License version
163229Spst * 2 along with this work; if not, write to the Free Software Foundation,
173229Spst * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
183229Spst *
193229Spst * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
203229Spst * or visit www.oracle.com if you need additional information or have any
213229Spst * questions.
223229Spst *
233229Spst */
243229Spst
253229Spst#include "precompiled.hpp"
263229Spst#include "gc/g1/g1Allocator.inline.hpp"
273229Spst#include "gc/g1/g1AllocRegion.inline.hpp"
283229Spst#include "gc/g1/g1CollectedHeap.inline.hpp"
293229Spst#include "gc/g1/g1CollectorPolicy.hpp"
303229Spst#include "gc/g1/g1MarkSweep.hpp"
313229Spst#include "gc/g1/heapRegion.inline.hpp"
323229Spst#include "gc/g1/heapRegionSet.inline.hpp"
333229Spst
343229SpstG1DefaultAllocator::G1DefaultAllocator(G1CollectedHeap* heap) :
353229Spst  G1Allocator(heap),
363229Spst  _retained_old_gc_alloc_region(NULL),
373229Spst  _survivor_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Young)),
383229Spst  _old_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Old)) {
393229Spst}
403229Spst
413229Spstvoid G1DefaultAllocator::init_mutator_alloc_region() {
423229Spst  assert(_mutator_alloc_region.get() == NULL, "pre-condition");
433229Spst  _mutator_alloc_region.init();
443229Spst}
453229Spst
463229Spstvoid G1DefaultAllocator::release_mutator_alloc_region() {
473229Spst  _mutator_alloc_region.release();
483229Spst  assert(_mutator_alloc_region.get() == NULL, "post-condition");
493229Spst}
503229Spst
513229Spstvoid G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
523229Spst                                            OldGCAllocRegion* old,
533229Spst                                            HeapRegion** retained_old) {
543229Spst  HeapRegion* retained_region = *retained_old;
553229Spst  *retained_old = NULL;
563229Spst  assert(retained_region == NULL || !retained_region->is_archive(),
573229Spst         "Archive region should not be alloc region (index %u)", retained_region->hrm_index());
583229Spst
593229Spst  // We will discard the current GC alloc region if:
603229Spst  // a) it's in the collection set (it can happen!),
613229Spst  // b) it's already full (no point in using it),
623229Spst  // c) it's empty (this means that it was emptied during
633229Spst  // a cleanup and it should be on the free list now), or
643229Spst  // d) it's humongous (this means that it was emptied
653229Spst  // during a cleanup and was added to the free list, but
663229Spst  // has been subsequently used to allocate a humongous
673229Spst  // object that may be less than the region size).
683229Spst  if (retained_region != NULL &&
693229Spst      !retained_region->in_collection_set() &&
703229Spst      !(retained_region->top() == retained_region->end()) &&
713229Spst      !retained_region->is_empty() &&
723229Spst      !retained_region->is_humongous()) {
733229Spst    retained_region->record_timestamp();
743229Spst    // The retained region was added to the old region set when it was
753229Spst    // retired. We have to remove it now, since we don't allow regions
763229Spst    // we allocate to in the region sets. We'll re-add it later, when
773229Spst    // it's retired again.
783229Spst    _g1h->old_set_remove(retained_region);
793229Spst    bool during_im = _g1h->collector_state()->during_initial_mark_pause();
803229Spst    retained_region->note_start_of_copying(during_im);
813229Spst    old->set(retained_region);
823229Spst    _g1h->hr_printer()->reuse(retained_region);
833229Spst    evacuation_info.set_alloc_regions_used_before(retained_region->used());
843229Spst  }
853229Spst}
863229Spst
873229Spstvoid G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
883229Spst  assert_at_safepoint(true /* should_be_vm_thread */);
893229Spst
903229Spst  G1Allocator::init_gc_alloc_regions(evacuation_info);
913229Spst
923229Spst  _survivor_gc_alloc_region.init();
933229Spst  _old_gc_alloc_region.init();
9413572Spst  reuse_retained_old_region(evacuation_info,
953229Spst                            &_old_gc_alloc_region,
963229Spst                            &_retained_old_gc_alloc_region);
973229Spst}
983229Spst
993229Spstvoid G1DefaultAllocator::release_gc_alloc_regions(EvacuationInfo& evacuation_info) {
1003229Spst  AllocationContext_t context = AllocationContext::current();
1013229Spst  evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
1023229Spst                                         old_gc_alloc_region(context)->count());
1033229Spst  survivor_gc_alloc_region(context)->release();
1043229Spst  // If we have an old GC alloc region to release, we'll save it in
1053229Spst  // _retained_old_gc_alloc_region. If we don't
1063229Spst  // _retained_old_gc_alloc_region will become NULL. This is what we
1073229Spst  // want either way so no reason to check explicitly for either
1083229Spst  // condition.
1093229Spst  _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
1103229Spst  if (_retained_old_gc_alloc_region != NULL) {
1113229Spst    _retained_old_gc_alloc_region->record_retained_region();
1123229Spst  }
1133229Spst
1143229Spst  _g1h->alloc_buffer_stats(InCSetState::Young)->adjust_desired_plab_sz();
1153229Spst  _g1h->alloc_buffer_stats(InCSetState::Old)->adjust_desired_plab_sz();
11613572Spst}
1173229Spst
1183229Spstvoid G1DefaultAllocator::abandon_gc_alloc_regions() {
1193229Spst  assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
1203229Spst  assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
1213229Spst  _retained_old_gc_alloc_region = NULL;
1223229Spst}
1233229Spst
1243229SpstG1PLAB::G1PLAB(size_t gclab_word_size) :
1253229Spst  PLAB(gclab_word_size), _retired(true) { }
1263229Spst
1273229Spstsize_t G1Allocator::unsafe_max_tlab_alloc(AllocationContext_t context) {
1283229Spst  // Return the remaining space in the cur alloc region, but not less than
1293229Spst  // the min TLAB size.
1303229Spst
1313229Spst  // Also, this value can be at most the humongous object threshold,
1323229Spst  // since we can't allow tlabs to grow big enough to accommodate
1333229Spst  // humongous objects.
1343229Spst
1353229Spst  HeapRegion* hr = mutator_alloc_region(context)->get();
1363229Spst  size_t max_tlab = _g1h->max_tlab_size() * wordSize;
137  if (hr == NULL) {
138    return max_tlab;
139  } else {
140    return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
141  }
142}
143
144HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
145                                              size_t word_size,
146                                              AllocationContext_t context) {
147  size_t temp = 0;
148  HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp, context);
149  assert(result == NULL || temp == word_size,
150         "Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
151         word_size, temp, p2i(result));
152  return result;
153}
154
155HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
156                                              size_t min_word_size,
157                                              size_t desired_word_size,
158                                              size_t* actual_word_size,
159                                              AllocationContext_t context) {
160  switch (dest.value()) {
161    case InCSetState::Young:
162      return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context);
163    case InCSetState::Old:
164      return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context);
165    default:
166      ShouldNotReachHere();
167      return NULL; // Keep some compilers happy
168  }
169}
170
171bool G1Allocator::survivor_is_full(AllocationContext_t context) const {
172  return _survivor_is_full;
173}
174
175bool G1Allocator::old_is_full(AllocationContext_t context) const {
176  return _old_is_full;
177}
178
179void G1Allocator::set_survivor_full(AllocationContext_t context) {
180  _survivor_is_full = true;
181}
182
183void G1Allocator::set_old_full(AllocationContext_t context) {
184  _old_is_full = true;
185}
186
187HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size,
188                                                   size_t desired_word_size,
189                                                   size_t* actual_word_size,
190                                                   AllocationContext_t context) {
191  assert(!_g1h->is_humongous(desired_word_size),
192         "we should not be seeing humongous-size allocations in this path");
193
194  HeapWord* result = survivor_gc_alloc_region(context)->attempt_allocation(min_word_size,
195                                                                           desired_word_size,
196                                                                           actual_word_size,
197                                                                           false /* bot_updates */);
198  if (result == NULL && !survivor_is_full(context)) {
199    MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
200    result = survivor_gc_alloc_region(context)->attempt_allocation_locked(min_word_size,
201                                                                          desired_word_size,
202                                                                          actual_word_size,
203                                                                          false /* bot_updates */);
204    if (result == NULL) {
205      set_survivor_full(context);
206    }
207  }
208  if (result != NULL) {
209    _g1h->dirty_young_block(result, *actual_word_size);
210  }
211  return result;
212}
213
214HeapWord* G1Allocator::old_attempt_allocation(size_t min_word_size,
215                                              size_t desired_word_size,
216                                              size_t* actual_word_size,
217                                              AllocationContext_t context) {
218  assert(!_g1h->is_humongous(desired_word_size),
219         "we should not be seeing humongous-size allocations in this path");
220
221  HeapWord* result = old_gc_alloc_region(context)->attempt_allocation(min_word_size,
222                                                                      desired_word_size,
223                                                                      actual_word_size,
224                                                                      true /* bot_updates */);
225  if (result == NULL && !old_is_full(context)) {
226    MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
227    result = old_gc_alloc_region(context)->attempt_allocation_locked(min_word_size,
228                                                                     desired_word_size,
229                                                                     actual_word_size,
230                                                                     true /* bot_updates */);
231    if (result == NULL) {
232      set_old_full(context);
233    }
234  }
235  return result;
236}
237
238void G1Allocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
239  _survivor_is_full = false;
240  _old_is_full = false;
241}
242
243G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) :
244  _g1h(G1CollectedHeap::heap()),
245  _allocator(allocator),
246  _survivor_alignment_bytes(calc_survivor_alignment_bytes()) {
247  for (size_t i = 0; i < ARRAY_SIZE(_direct_allocated); i++) {
248    _direct_allocated[i] = 0;
249  }
250}
251
252bool G1PLABAllocator::may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const {
253  return (allocation_word_sz * 100 < buffer_size * ParallelGCBufferWastePct);
254}
255
256HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest,
257                                                       size_t word_sz,
258                                                       AllocationContext_t context,
259                                                       bool* plab_refill_failed) {
260  size_t plab_word_size = G1CollectedHeap::heap()->desired_plab_sz(dest);
261  size_t required_in_plab = PLAB::size_required_for_allocation(word_sz);
262
263  // Only get a new PLAB if the allocation fits and it would not waste more than
264  // ParallelGCBufferWastePct in the existing buffer.
265  if ((required_in_plab <= plab_word_size) &&
266    may_throw_away_buffer(required_in_plab, plab_word_size)) {
267
268    G1PLAB* alloc_buf = alloc_buffer(dest, context);
269    alloc_buf->retire();
270
271    size_t actual_plab_size = 0;
272    HeapWord* buf = _allocator->par_allocate_during_gc(dest,
273                                                       required_in_plab,
274                                                       plab_word_size,
275                                                       &actual_plab_size,
276                                                       context);
277
278    assert(buf == NULL || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)),
279           "Requested at minimum " SIZE_FORMAT ", desired " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
280           required_in_plab, plab_word_size, actual_plab_size, p2i(buf));
281
282    if (buf != NULL) {
283      alloc_buf->set_buf(buf, actual_plab_size);
284
285      HeapWord* const obj = alloc_buf->allocate(word_sz);
286      assert(obj != NULL, "PLAB should have been big enough, tried to allocate "
287                          SIZE_FORMAT " requiring " SIZE_FORMAT " PLAB size " SIZE_FORMAT,
288                          word_sz, required_in_plab, plab_word_size);
289      return obj;
290    }
291    // Otherwise.
292    *plab_refill_failed = true;
293  }
294  // Try direct allocation.
295  HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz, context);
296  if (result != NULL) {
297    _direct_allocated[dest.value()] += word_sz;
298  }
299  return result;
300}
301
302void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
303  alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
304}
305
306G1DefaultPLABAllocator::G1DefaultPLABAllocator(G1Allocator* allocator) :
307  G1PLABAllocator(allocator),
308  _surviving_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Young)),
309  _tenured_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Old)) {
310  for (uint state = 0; state < InCSetState::Num; state++) {
311    _alloc_buffers[state] = NULL;
312  }
313  _alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer;
314  _alloc_buffers[InCSetState::Old]  = &_tenured_alloc_buffer;
315}
316
317void G1DefaultPLABAllocator::flush_and_retire_stats() {
318  for (uint state = 0; state < InCSetState::Num; state++) {
319    G1PLAB* const buf = _alloc_buffers[state];
320    if (buf != NULL) {
321      G1EvacStats* stats = _g1h->alloc_buffer_stats(state);
322      buf->flush_and_retire_stats(stats);
323      stats->add_direct_allocated(_direct_allocated[state]);
324      _direct_allocated[state] = 0;
325    }
326  }
327}
328
329void G1DefaultPLABAllocator::waste(size_t& wasted, size_t& undo_wasted) {
330  wasted = 0;
331  undo_wasted = 0;
332  for (uint state = 0; state < InCSetState::Num; state++) {
333    G1PLAB * const buf = _alloc_buffers[state];
334    if (buf != NULL) {
335      wasted += buf->waste();
336      undo_wasted += buf->undo_waste();
337    }
338  }
339}
340
341G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h) {
342  // Create the archive allocator, and also enable archive object checking
343  // in mark-sweep, since we will be creating archive regions.
344  G1ArchiveAllocator* result =  new G1ArchiveAllocator(g1h);
345  G1MarkSweep::enable_archive_object_check();
346  return result;
347}
348
349bool G1ArchiveAllocator::alloc_new_region() {
350  // Allocate the highest free region in the reserved heap,
351  // and add it to our list of allocated regions. It is marked
352  // archive and added to the old set.
353  HeapRegion* hr = _g1h->alloc_highest_free_region();
354  if (hr == NULL) {
355    return false;
356  }
357  assert(hr->is_empty(), "expected empty region (index %u)", hr->hrm_index());
358  hr->set_archive();
359  _g1h->old_set_add(hr);
360  _g1h->hr_printer()->alloc(hr, G1HRPrinter::Archive);
361  _allocated_regions.append(hr);
362  _allocation_region = hr;
363
364  // Set up _bottom and _max to begin allocating in the lowest
365  // min_region_size'd chunk of the allocated G1 region.
366  _bottom = hr->bottom();
367  _max = _bottom + HeapRegion::min_region_size_in_words();
368
369  // Tell mark-sweep that objects in this region are not to be marked.
370  G1MarkSweep::set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), true);
371
372  // Since we've modified the old set, call update_sizes.
373  _g1h->g1mm()->update_sizes();
374  return true;
375}
376
377HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) {
378  assert(word_size != 0, "size must not be zero");
379  if (_allocation_region == NULL) {
380    if (!alloc_new_region()) {
381      return NULL;
382    }
383  }
384  HeapWord* old_top = _allocation_region->top();
385  assert(_bottom >= _allocation_region->bottom(),
386         "inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
387         p2i(_bottom), p2i(_allocation_region->bottom()));
388  assert(_max <= _allocation_region->end(),
389         "inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,
390         p2i(_max), p2i(_allocation_region->end()));
391  assert(_bottom <= old_top && old_top <= _max,
392         "inconsistent allocation state: expected "
393         PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
394         p2i(_bottom), p2i(old_top), p2i(_max));
395
396  // Allocate the next word_size words in the current allocation chunk.
397  // If allocation would cross the _max boundary, insert a filler and begin
398  // at the base of the next min_region_size'd chunk. Also advance to the next
399  // chunk if we don't yet cross the boundary, but the remainder would be too
400  // small to fill.
401  HeapWord* new_top = old_top + word_size;
402  size_t remainder = pointer_delta(_max, new_top);
403  if ((new_top > _max) ||
404      ((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) {
405    if (old_top != _max) {
406      size_t fill_size = pointer_delta(_max, old_top);
407      CollectedHeap::fill_with_object(old_top, fill_size);
408      _summary_bytes_used += fill_size * HeapWordSize;
409    }
410    _allocation_region->set_top(_max);
411    old_top = _bottom = _max;
412
413    // Check if we've just used up the last min_region_size'd chunk
414    // in the current region, and if so, allocate a new one.
415    if (_bottom != _allocation_region->end()) {
416      _max = _bottom + HeapRegion::min_region_size_in_words();
417    } else {
418      if (!alloc_new_region()) {
419        return NULL;
420      }
421      old_top = _allocation_region->bottom();
422    }
423  }
424  _allocation_region->set_top(old_top + word_size);
425  _summary_bytes_used += word_size * HeapWordSize;
426
427  return old_top;
428}
429
430void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
431                                          size_t end_alignment_in_bytes) {
432  assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
433         "alignment " SIZE_FORMAT " too large", end_alignment_in_bytes);
434  assert(is_size_aligned(end_alignment_in_bytes, HeapWordSize),
435         "alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize);
436
437  // If we've allocated nothing, simply return.
438  if (_allocation_region == NULL) {
439    return;
440  }
441
442  // If an end alignment was requested, insert filler objects.
443  if (end_alignment_in_bytes != 0) {
444    HeapWord* currtop = _allocation_region->top();
445    HeapWord* newtop = (HeapWord*)align_pointer_up(currtop, end_alignment_in_bytes);
446    size_t fill_size = pointer_delta(newtop, currtop);
447    if (fill_size != 0) {
448      if (fill_size < CollectedHeap::min_fill_size()) {
449        // If the required fill is smaller than we can represent,
450        // bump up to the next aligned address. We know we won't exceed the current
451        // region boundary because the max supported alignment is smaller than the min
452        // region size, and because the allocation code never leaves space smaller than
453        // the min_fill_size at the top of the current allocation region.
454        newtop = (HeapWord*)align_pointer_up(currtop + CollectedHeap::min_fill_size(),
455                                             end_alignment_in_bytes);
456        fill_size = pointer_delta(newtop, currtop);
457      }
458      HeapWord* fill = archive_mem_allocate(fill_size);
459      CollectedHeap::fill_with_objects(fill, fill_size);
460    }
461  }
462
463  // Loop through the allocated regions, and create MemRegions summarizing
464  // the allocated address range, combining contiguous ranges. Add the
465  // MemRegions to the GrowableArray provided by the caller.
466  int index = _allocated_regions.length() - 1;
467  assert(_allocated_regions.at(index) == _allocation_region,
468         "expected region %u at end of array, found %u",
469         _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index());
470  HeapWord* base_address = _allocation_region->bottom();
471  HeapWord* top = base_address;
472
473  while (index >= 0) {
474    HeapRegion* next = _allocated_regions.at(index);
475    HeapWord* new_base = next->bottom();
476    HeapWord* new_top = next->top();
477    if (new_base != top) {
478      ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
479      base_address = new_base;
480    }
481    top = new_top;
482    index = index - 1;
483  }
484
485  assert(top != base_address, "zero-sized range, address " PTR_FORMAT, p2i(base_address));
486  ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
487  _allocated_regions.clear();
488  _allocation_region = NULL;
489};
490