psYoungGen.cpp revision 9727:f944761a3ce3
1/*
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/parallel/mutableNUMASpace.hpp"
27#include "gc/parallel/parallelScavengeHeap.hpp"
28#include "gc/parallel/psMarkSweepDecorator.hpp"
29#include "gc/parallel/psScavenge.hpp"
30#include "gc/parallel/psYoungGen.hpp"
31#include "gc/shared/gcUtil.hpp"
32#include "gc/shared/spaceDecorator.hpp"
33#include "logging/log.hpp"
34#include "oops/oop.inline.hpp"
35#include "runtime/java.hpp"
36
37PSYoungGen::PSYoungGen(size_t        initial_size,
38                       size_t        min_size,
39                       size_t        max_size) :
40  _init_gen_size(initial_size),
41  _min_gen_size(min_size),
42  _max_gen_size(max_size)
43{}
44
45void PSYoungGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
46  assert(_init_gen_size != 0, "Should have a finite size");
47  _virtual_space = new PSVirtualSpace(rs, alignment);
48  if (!virtual_space()->expand_by(_init_gen_size)) {
49    vm_exit_during_initialization("Could not reserve enough space for "
50                                  "object heap");
51  }
52}
53
54void PSYoungGen::initialize(ReservedSpace rs, size_t alignment) {
55  initialize_virtual_space(rs, alignment);
56  initialize_work();
57}
58
59void PSYoungGen::initialize_work() {
60
61  _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
62                        (HeapWord*)virtual_space()->high_boundary());
63
64  MemRegion cmr((HeapWord*)virtual_space()->low(),
65                (HeapWord*)virtual_space()->high());
66  ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr);
67
68  if (ZapUnusedHeapArea) {
69    // Mangle newly committed space immediately because it
70    // can be done here more simply that after the new
71    // spaces have been computed.
72    SpaceMangler::mangle_region(cmr);
73  }
74
75  if (UseNUMA) {
76    _eden_space = new MutableNUMASpace(virtual_space()->alignment());
77  } else {
78    _eden_space = new MutableSpace(virtual_space()->alignment());
79  }
80  _from_space = new MutableSpace(virtual_space()->alignment());
81  _to_space   = new MutableSpace(virtual_space()->alignment());
82
83  if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
84    vm_exit_during_initialization("Could not allocate a young gen space");
85  }
86
87  // Allocate the mark sweep views of spaces
88  _eden_mark_sweep =
89      new PSMarkSweepDecorator(_eden_space, NULL, MarkSweepDeadRatio);
90  _from_mark_sweep =
91      new PSMarkSweepDecorator(_from_space, NULL, MarkSweepDeadRatio);
92  _to_mark_sweep =
93      new PSMarkSweepDecorator(_to_space, NULL, MarkSweepDeadRatio);
94
95  if (_eden_mark_sweep == NULL ||
96      _from_mark_sweep == NULL ||
97      _to_mark_sweep == NULL) {
98    vm_exit_during_initialization("Could not complete allocation"
99                                  " of the young generation");
100  }
101
102  // Generation Counters - generation 0, 3 subspaces
103  _gen_counters = new PSGenerationCounters("new", 0, 3, _min_gen_size,
104                                           _max_gen_size, _virtual_space);
105
106  // Compute maximum space sizes for performance counters
107  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
108  size_t alignment = heap->space_alignment();
109  size_t size = virtual_space()->reserved_size();
110
111  size_t max_survivor_size;
112  size_t max_eden_size;
113
114  if (UseAdaptiveSizePolicy) {
115    max_survivor_size = size / MinSurvivorRatio;
116
117    // round the survivor space size down to the nearest alignment
118    // and make sure its size is greater than 0.
119    max_survivor_size = align_size_down(max_survivor_size, alignment);
120    max_survivor_size = MAX2(max_survivor_size, alignment);
121
122    // set the maximum size of eden to be the size of the young gen
123    // less two times the minimum survivor size. The minimum survivor
124    // size for UseAdaptiveSizePolicy is one alignment.
125    max_eden_size = size - 2 * alignment;
126  } else {
127    max_survivor_size = size / InitialSurvivorRatio;
128
129    // round the survivor space size down to the nearest alignment
130    // and make sure its size is greater than 0.
131    max_survivor_size = align_size_down(max_survivor_size, alignment);
132    max_survivor_size = MAX2(max_survivor_size, alignment);
133
134    // set the maximum size of eden to be the size of the young gen
135    // less two times the survivor size when the generation is 100%
136    // committed. The minimum survivor size for -UseAdaptiveSizePolicy
137    // is dependent on the committed portion (current capacity) of the
138    // generation - the less space committed, the smaller the survivor
139    // space, possibly as small as an alignment. However, we are interested
140    // in the case where the young generation is 100% committed, as this
141    // is the point where eden reaches its maximum size. At this point,
142    // the size of a survivor space is max_survivor_size.
143    max_eden_size = size - 2 * max_survivor_size;
144  }
145
146  _eden_counters = new SpaceCounters("eden", 0, max_eden_size, _eden_space,
147                                     _gen_counters);
148  _from_counters = new SpaceCounters("s0", 1, max_survivor_size, _from_space,
149                                     _gen_counters);
150  _to_counters = new SpaceCounters("s1", 2, max_survivor_size, _to_space,
151                                   _gen_counters);
152
153  compute_initial_space_boundaries();
154}
155
156void PSYoungGen::compute_initial_space_boundaries() {
157  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
158
159  // Compute sizes
160  size_t alignment = heap->space_alignment();
161  size_t size = virtual_space()->committed_size();
162  assert(size >= 3 * alignment, "Young space is not large enough for eden + 2 survivors");
163
164  size_t survivor_size = size / InitialSurvivorRatio;
165  survivor_size = align_size_down(survivor_size, alignment);
166  // ... but never less than an alignment
167  survivor_size = MAX2(survivor_size, alignment);
168
169  // Young generation is eden + 2 survivor spaces
170  size_t eden_size = size - (2 * survivor_size);
171
172  // Now go ahead and set 'em.
173  set_space_boundaries(eden_size, survivor_size);
174  space_invariants();
175
176  if (UsePerfData) {
177    _eden_counters->update_capacity();
178    _from_counters->update_capacity();
179    _to_counters->update_capacity();
180  }
181}
182
183void PSYoungGen::set_space_boundaries(size_t eden_size, size_t survivor_size) {
184  assert(eden_size < virtual_space()->committed_size(), "just checking");
185  assert(eden_size > 0  && survivor_size > 0, "just checking");
186
187  // Initial layout is Eden, to, from. After swapping survivor spaces,
188  // that leaves us with Eden, from, to, which is step one in our two
189  // step resize-with-live-data procedure.
190  char *eden_start = virtual_space()->low();
191  char *to_start   = eden_start + eden_size;
192  char *from_start = to_start   + survivor_size;
193  char *from_end   = from_start + survivor_size;
194
195  assert(from_end == virtual_space()->high(), "just checking");
196  assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
197  assert(is_object_aligned((intptr_t)to_start),   "checking alignment");
198  assert(is_object_aligned((intptr_t)from_start), "checking alignment");
199
200  MemRegion eden_mr((HeapWord*)eden_start, (HeapWord*)to_start);
201  MemRegion to_mr  ((HeapWord*)to_start, (HeapWord*)from_start);
202  MemRegion from_mr((HeapWord*)from_start, (HeapWord*)from_end);
203
204  eden_space()->initialize(eden_mr, true, ZapUnusedHeapArea);
205    to_space()->initialize(to_mr  , true, ZapUnusedHeapArea);
206  from_space()->initialize(from_mr, true, ZapUnusedHeapArea);
207}
208
209#ifndef PRODUCT
210void PSYoungGen::space_invariants() {
211  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
212  const size_t alignment = heap->space_alignment();
213
214  // Currently, our eden size cannot shrink to zero
215  guarantee(eden_space()->capacity_in_bytes() >= alignment, "eden too small");
216  guarantee(from_space()->capacity_in_bytes() >= alignment, "from too small");
217  guarantee(to_space()->capacity_in_bytes() >= alignment, "to too small");
218
219  // Relationship of spaces to each other
220  char* eden_start = (char*)eden_space()->bottom();
221  char* eden_end   = (char*)eden_space()->end();
222  char* from_start = (char*)from_space()->bottom();
223  char* from_end   = (char*)from_space()->end();
224  char* to_start   = (char*)to_space()->bottom();
225  char* to_end     = (char*)to_space()->end();
226
227  guarantee(eden_start >= virtual_space()->low(), "eden bottom");
228  guarantee(eden_start < eden_end, "eden space consistency");
229  guarantee(from_start < from_end, "from space consistency");
230  guarantee(to_start < to_end, "to space consistency");
231
232  // Check whether from space is below to space
233  if (from_start < to_start) {
234    // Eden, from, to
235    guarantee(eden_end <= from_start, "eden/from boundary");
236    guarantee(from_end <= to_start,   "from/to boundary");
237    guarantee(to_end <= virtual_space()->high(), "to end");
238  } else {
239    // Eden, to, from
240    guarantee(eden_end <= to_start, "eden/to boundary");
241    guarantee(to_end <= from_start, "to/from boundary");
242    guarantee(from_end <= virtual_space()->high(), "from end");
243  }
244
245  // More checks that the virtual space is consistent with the spaces
246  assert(virtual_space()->committed_size() >=
247    (eden_space()->capacity_in_bytes() +
248     to_space()->capacity_in_bytes() +
249     from_space()->capacity_in_bytes()), "Committed size is inconsistent");
250  assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
251    "Space invariant");
252  char* eden_top = (char*)eden_space()->top();
253  char* from_top = (char*)from_space()->top();
254  char* to_top = (char*)to_space()->top();
255  assert(eden_top <= virtual_space()->high(), "eden top");
256  assert(from_top <= virtual_space()->high(), "from top");
257  assert(to_top <= virtual_space()->high(), "to top");
258
259  virtual_space()->verify();
260}
261#endif
262
263void PSYoungGen::resize(size_t eden_size, size_t survivor_size) {
264  // Resize the generation if needed. If the generation resize
265  // reports false, do not attempt to resize the spaces.
266  if (resize_generation(eden_size, survivor_size)) {
267    // Then we lay out the spaces inside the generation
268    resize_spaces(eden_size, survivor_size);
269
270    space_invariants();
271
272    log_trace(gc, ergo)("Young generation size: "
273                        "desired eden: " SIZE_FORMAT " survivor: " SIZE_FORMAT
274                        " used: " SIZE_FORMAT " capacity: " SIZE_FORMAT
275                        " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
276                        eden_size, survivor_size, used_in_bytes(), capacity_in_bytes(),
277                        _max_gen_size, min_gen_size());
278  }
279}
280
281
282bool PSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
283  const size_t alignment = virtual_space()->alignment();
284  size_t orig_size = virtual_space()->committed_size();
285  bool size_changed = false;
286
287  // There used to be this guarantee there.
288  // guarantee ((eden_size + 2*survivor_size)  <= _max_gen_size, "incorrect input arguments");
289  // Code below forces this requirement.  In addition the desired eden
290  // size and desired survivor sizes are desired goals and may
291  // exceed the total generation size.
292
293  assert(min_gen_size() <= orig_size && orig_size <= max_size(), "just checking");
294
295  // Adjust new generation size
296  const size_t eden_plus_survivors =
297          align_size_up(eden_size + 2 * survivor_size, alignment);
298  size_t desired_size = MAX2(MIN2(eden_plus_survivors, max_size()),
299                             min_gen_size());
300  assert(desired_size <= max_size(), "just checking");
301
302  if (desired_size > orig_size) {
303    // Grow the generation
304    size_t change = desired_size - orig_size;
305    assert(change % alignment == 0, "just checking");
306    HeapWord* prev_high = (HeapWord*) virtual_space()->high();
307    if (!virtual_space()->expand_by(change)) {
308      return false; // Error if we fail to resize!
309    }
310    if (ZapUnusedHeapArea) {
311      // Mangle newly committed space immediately because it
312      // can be done here more simply that after the new
313      // spaces have been computed.
314      HeapWord* new_high = (HeapWord*) virtual_space()->high();
315      MemRegion mangle_region(prev_high, new_high);
316      SpaceMangler::mangle_region(mangle_region);
317    }
318    size_changed = true;
319  } else if (desired_size < orig_size) {
320    size_t desired_change = orig_size - desired_size;
321    assert(desired_change % alignment == 0, "just checking");
322
323    desired_change = limit_gen_shrink(desired_change);
324
325    if (desired_change > 0) {
326      virtual_space()->shrink_by(desired_change);
327      reset_survivors_after_shrink();
328
329      size_changed = true;
330    }
331  } else {
332    if (orig_size == gen_size_limit()) {
333      log_trace(gc)("PSYoung generation size at maximum: " SIZE_FORMAT "K", orig_size/K);
334    } else if (orig_size == min_gen_size()) {
335      log_trace(gc)("PSYoung generation size at minium: " SIZE_FORMAT "K", orig_size/K);
336    }
337  }
338
339  if (size_changed) {
340    post_resize();
341    log_trace(gc)("PSYoung generation size changed: " SIZE_FORMAT "K->" SIZE_FORMAT "K",
342                  orig_size/K, virtual_space()->committed_size()/K);
343  }
344
345  guarantee(eden_plus_survivors <= virtual_space()->committed_size() ||
346            virtual_space()->committed_size() == max_size(), "Sanity");
347
348  return true;
349}
350
351#ifndef PRODUCT
352// In the numa case eden is not mangled so a survivor space
353// moving into a region previously occupied by a survivor
354// may find an unmangled region.  Also in the PS case eden
355// to-space and from-space may not touch (i.e., there may be
356// gaps between them due to movement while resizing the
357// spaces).  Those gaps must be mangled.
358void PSYoungGen::mangle_survivors(MutableSpace* s1,
359                                  MemRegion s1MR,
360                                  MutableSpace* s2,
361                                  MemRegion s2MR) {
362  // Check eden and gap between eden and from-space, in deciding
363  // what to mangle in from-space.  Check the gap between from-space
364  // and to-space when deciding what to mangle.
365  //
366  //      +--------+   +----+    +---+
367  //      | eden   |   |s1  |    |s2 |
368  //      +--------+   +----+    +---+
369  //                 +-------+ +-----+
370  //                 |s1MR   | |s2MR |
371  //                 +-------+ +-----+
372  // All of survivor-space is properly mangled so find the
373  // upper bound on the mangling for any portion above current s1.
374  HeapWord* delta_end = MIN2(s1->bottom(), s1MR.end());
375  MemRegion delta1_left;
376  if (s1MR.start() < delta_end) {
377    delta1_left = MemRegion(s1MR.start(), delta_end);
378    s1->mangle_region(delta1_left);
379  }
380  // Find any portion to the right of the current s1.
381  HeapWord* delta_start = MAX2(s1->end(), s1MR.start());
382  MemRegion delta1_right;
383  if (delta_start < s1MR.end()) {
384    delta1_right = MemRegion(delta_start, s1MR.end());
385    s1->mangle_region(delta1_right);
386  }
387
388  // Similarly for the second survivor space except that
389  // any of the new region that overlaps with the current
390  // region of the first survivor space has already been
391  // mangled.
392  delta_end = MIN2(s2->bottom(), s2MR.end());
393  delta_start = MAX2(s2MR.start(), s1->end());
394  MemRegion delta2_left;
395  if (s2MR.start() < delta_end) {
396    delta2_left = MemRegion(s2MR.start(), delta_end);
397    s2->mangle_region(delta2_left);
398  }
399  delta_start = MAX2(s2->end(), s2MR.start());
400  MemRegion delta2_right;
401  if (delta_start < s2MR.end()) {
402    s2->mangle_region(delta2_right);
403  }
404
405  // s1
406  log_develop_trace(gc)("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") "
407    "New region: [" PTR_FORMAT ", " PTR_FORMAT ")",
408    p2i(s1->bottom()), p2i(s1->end()),
409    p2i(s1MR.start()), p2i(s1MR.end()));
410  log_develop_trace(gc)("    Mangle before: [" PTR_FORMAT ", "
411    PTR_FORMAT ")  Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")",
412    p2i(delta1_left.start()), p2i(delta1_left.end()),
413    p2i(delta1_right.start()), p2i(delta1_right.end()));
414
415  // s2
416  log_develop_trace(gc)("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") "
417    "New region: [" PTR_FORMAT ", " PTR_FORMAT ")",
418    p2i(s2->bottom()), p2i(s2->end()),
419    p2i(s2MR.start()), p2i(s2MR.end()));
420  log_develop_trace(gc)("    Mangle before: [" PTR_FORMAT ", "
421    PTR_FORMAT ")  Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")",
422    p2i(delta2_left.start()), p2i(delta2_left.end()),
423    p2i(delta2_right.start()), p2i(delta2_right.end()));
424}
425#endif // NOT PRODUCT
426
427void PSYoungGen::resize_spaces(size_t requested_eden_size,
428                               size_t requested_survivor_size) {
429  assert(UseAdaptiveSizePolicy, "sanity check");
430  assert(requested_eden_size > 0  && requested_survivor_size > 0,
431         "just checking");
432
433  // We require eden and to space to be empty
434  if ((!eden_space()->is_empty()) || (!to_space()->is_empty())) {
435    return;
436  }
437
438  log_trace(gc, ergo)("PSYoungGen::resize_spaces(requested_eden_size: " SIZE_FORMAT ", requested_survivor_size: " SIZE_FORMAT ")",
439                      requested_eden_size, requested_survivor_size);
440  log_trace(gc, ergo)("    eden: [" PTR_FORMAT ".." PTR_FORMAT ") " SIZE_FORMAT,
441                      p2i(eden_space()->bottom()),
442                      p2i(eden_space()->end()),
443                      pointer_delta(eden_space()->end(),
444                                    eden_space()->bottom(),
445                                    sizeof(char)));
446  log_trace(gc, ergo)("    from: [" PTR_FORMAT ".." PTR_FORMAT ") " SIZE_FORMAT,
447                      p2i(from_space()->bottom()),
448                      p2i(from_space()->end()),
449                      pointer_delta(from_space()->end(),
450                                    from_space()->bottom(),
451                                    sizeof(char)));
452  log_trace(gc, ergo)("      to: [" PTR_FORMAT ".." PTR_FORMAT ") " SIZE_FORMAT,
453                      p2i(to_space()->bottom()),
454                      p2i(to_space()->end()),
455                      pointer_delta(  to_space()->end(),
456                                      to_space()->bottom(),
457                                      sizeof(char)));
458
459  // There's nothing to do if the new sizes are the same as the current
460  if (requested_survivor_size == to_space()->capacity_in_bytes() &&
461      requested_survivor_size == from_space()->capacity_in_bytes() &&
462      requested_eden_size == eden_space()->capacity_in_bytes()) {
463    log_trace(gc, ergo)("    capacities are the right sizes, returning");
464    return;
465  }
466
467  char* eden_start = (char*)eden_space()->bottom();
468  char* eden_end   = (char*)eden_space()->end();
469  char* from_start = (char*)from_space()->bottom();
470  char* from_end   = (char*)from_space()->end();
471  char* to_start   = (char*)to_space()->bottom();
472  char* to_end     = (char*)to_space()->end();
473
474  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
475  const size_t alignment = heap->space_alignment();
476  const bool maintain_minimum =
477    (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
478
479  bool eden_from_to_order = from_start < to_start;
480  // Check whether from space is below to space
481  if (eden_from_to_order) {
482    // Eden, from, to
483    eden_from_to_order = true;
484    log_trace(gc, ergo)("  Eden, from, to:");
485
486    // Set eden
487    // "requested_eden_size" is a goal for the size of eden
488    // and may not be attainable.  "eden_size" below is
489    // calculated based on the location of from-space and
490    // the goal for the size of eden.  from-space is
491    // fixed in place because it contains live data.
492    // The calculation is done this way to avoid 32bit
493    // overflow (i.e., eden_start + requested_eden_size
494    // may too large for representation in 32bits).
495    size_t eden_size;
496    if (maintain_minimum) {
497      // Only make eden larger than the requested size if
498      // the minimum size of the generation has to be maintained.
499      // This could be done in general but policy at a higher
500      // level is determining a requested size for eden and that
501      // should be honored unless there is a fundamental reason.
502      eden_size = pointer_delta(from_start,
503                                eden_start,
504                                sizeof(char));
505    } else {
506      eden_size = MIN2(requested_eden_size,
507                       pointer_delta(from_start, eden_start, sizeof(char)));
508    }
509
510    eden_end = eden_start + eden_size;
511    assert(eden_end >= eden_start, "addition overflowed");
512
513    // To may resize into from space as long as it is clear of live data.
514    // From space must remain page aligned, though, so we need to do some
515    // extra calculations.
516
517    // First calculate an optimal to-space
518    to_end   = (char*)virtual_space()->high();
519    to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
520                                    sizeof(char));
521
522    // Does the optimal to-space overlap from-space?
523    if (to_start < (char*)from_space()->end()) {
524      // Calculate the minimum offset possible for from_end
525      size_t from_size = pointer_delta(from_space()->top(), from_start, sizeof(char));
526
527      // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
528      if (from_size == 0) {
529        from_size = alignment;
530      } else {
531        from_size = align_size_up(from_size, alignment);
532      }
533
534      from_end = from_start + from_size;
535      assert(from_end > from_start, "addition overflow or from_size problem");
536
537      guarantee(from_end <= (char*)from_space()->end(), "from_end moved to the right");
538
539      // Now update to_start with the new from_end
540      to_start = MAX2(from_end, to_start);
541    }
542
543    guarantee(to_start != to_end, "to space is zero sized");
544
545    log_trace(gc, ergo)("    [eden_start .. eden_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
546                        p2i(eden_start),
547                        p2i(eden_end),
548                        pointer_delta(eden_end, eden_start, sizeof(char)));
549    log_trace(gc, ergo)("    [from_start .. from_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
550                        p2i(from_start),
551                        p2i(from_end),
552                        pointer_delta(from_end, from_start, sizeof(char)));
553    log_trace(gc, ergo)("    [  to_start ..   to_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
554                        p2i(to_start),
555                        p2i(to_end),
556                        pointer_delta(  to_end,   to_start, sizeof(char)));
557  } else {
558    // Eden, to, from
559    log_trace(gc, ergo)("  Eden, to, from:");
560
561    // To space gets priority over eden resizing. Note that we position
562    // to space as if we were able to resize from space, even though from
563    // space is not modified.
564    // Giving eden priority was tried and gave poorer performance.
565    to_end   = (char*)pointer_delta(virtual_space()->high(),
566                                    (char*)requested_survivor_size,
567                                    sizeof(char));
568    to_end   = MIN2(to_end, from_start);
569    to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
570                                    sizeof(char));
571    // if the space sizes are to be increased by several times then
572    // 'to_start' will point beyond the young generation. In this case
573    // 'to_start' should be adjusted.
574    to_start = MAX2(to_start, eden_start + alignment);
575
576    // Compute how big eden can be, then adjust end.
577    // See  comments above on calculating eden_end.
578    size_t eden_size;
579    if (maintain_minimum) {
580      eden_size = pointer_delta(to_start, eden_start, sizeof(char));
581    } else {
582      eden_size = MIN2(requested_eden_size,
583                       pointer_delta(to_start, eden_start, sizeof(char)));
584    }
585    eden_end = eden_start + eden_size;
586    assert(eden_end >= eden_start, "addition overflowed");
587
588    // Could choose to not let eden shrink
589    // to_start = MAX2(to_start, eden_end);
590
591    // Don't let eden shrink down to 0 or less.
592    eden_end = MAX2(eden_end, eden_start + alignment);
593    to_start = MAX2(to_start, eden_end);
594
595    log_trace(gc, ergo)("    [eden_start .. eden_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
596                        p2i(eden_start),
597                        p2i(eden_end),
598                        pointer_delta(eden_end, eden_start, sizeof(char)));
599    log_trace(gc, ergo)("    [  to_start ..   to_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
600                        p2i(to_start),
601                        p2i(to_end),
602                        pointer_delta(  to_end,   to_start, sizeof(char)));
603    log_trace(gc, ergo)("    [from_start .. from_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
604                        p2i(from_start),
605                        p2i(from_end),
606                        pointer_delta(from_end, from_start, sizeof(char)));
607  }
608
609
610  guarantee((HeapWord*)from_start <= from_space()->bottom(),
611            "from start moved to the right");
612  guarantee((HeapWord*)from_end >= from_space()->top(),
613            "from end moved into live data");
614  assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
615  assert(is_object_aligned((intptr_t)from_start), "checking alignment");
616  assert(is_object_aligned((intptr_t)to_start), "checking alignment");
617
618  MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
619  MemRegion toMR  ((HeapWord*)to_start,   (HeapWord*)to_end);
620  MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end);
621
622  // Let's make sure the call to initialize doesn't reset "top"!
623  HeapWord* old_from_top = from_space()->top();
624
625  // For logging block  below
626  size_t old_from = from_space()->capacity_in_bytes();
627  size_t old_to   = to_space()->capacity_in_bytes();
628
629  if (ZapUnusedHeapArea) {
630    // NUMA is a special case because a numa space is not mangled
631    // in order to not prematurely bind its address to memory to
632    // the wrong memory (i.e., don't want the GC thread to first
633    // touch the memory).  The survivor spaces are not numa
634    // spaces and are mangled.
635    if (UseNUMA) {
636      if (eden_from_to_order) {
637        mangle_survivors(from_space(), fromMR, to_space(), toMR);
638      } else {
639        mangle_survivors(to_space(), toMR, from_space(), fromMR);
640      }
641    }
642
643    // If not mangling the spaces, do some checking to verify that
644    // the spaces are already mangled.
645    // The spaces should be correctly mangled at this point so
646    // do some checking here. Note that they are not being mangled
647    // in the calls to initialize().
648    // Must check mangling before the spaces are reshaped.  Otherwise,
649    // the bottom or end of one space may have moved into an area
650    // covered by another space and a failure of the check may
651    // not correctly indicate which space is not properly mangled.
652    HeapWord* limit = (HeapWord*) virtual_space()->high();
653    eden_space()->check_mangled_unused_area(limit);
654    from_space()->check_mangled_unused_area(limit);
655      to_space()->check_mangled_unused_area(limit);
656  }
657  // When an existing space is being initialized, it is not
658  // mangled because the space has been previously mangled.
659  eden_space()->initialize(edenMR,
660                           SpaceDecorator::Clear,
661                           SpaceDecorator::DontMangle);
662    to_space()->initialize(toMR,
663                           SpaceDecorator::Clear,
664                           SpaceDecorator::DontMangle);
665  from_space()->initialize(fromMR,
666                           SpaceDecorator::DontClear,
667                           SpaceDecorator::DontMangle);
668
669  assert(from_space()->top() == old_from_top, "from top changed!");
670
671  log_trace(gc, ergo)("AdaptiveSizePolicy::survivor space sizes: collection: %d (" SIZE_FORMAT ", " SIZE_FORMAT ") -> (" SIZE_FORMAT ", " SIZE_FORMAT ") ",
672                      ParallelScavengeHeap::heap()->total_collections(),
673                      old_from, old_to,
674                      from_space()->capacity_in_bytes(),
675                      to_space()->capacity_in_bytes());
676}
677
678void PSYoungGen::swap_spaces() {
679  MutableSpace* s    = from_space();
680  _from_space        = to_space();
681  _to_space          = s;
682
683  // Now update the decorators.
684  PSMarkSweepDecorator* md = from_mark_sweep();
685  _from_mark_sweep           = to_mark_sweep();
686  _to_mark_sweep             = md;
687
688  assert(from_mark_sweep()->space() == from_space(), "Sanity");
689  assert(to_mark_sweep()->space() == to_space(), "Sanity");
690}
691
692size_t PSYoungGen::capacity_in_bytes() const {
693  return eden_space()->capacity_in_bytes()
694       + from_space()->capacity_in_bytes();  // to_space() is only used during scavenge
695}
696
697
698size_t PSYoungGen::used_in_bytes() const {
699  return eden_space()->used_in_bytes()
700       + from_space()->used_in_bytes();      // to_space() is only used during scavenge
701}
702
703
704size_t PSYoungGen::free_in_bytes() const {
705  return eden_space()->free_in_bytes()
706       + from_space()->free_in_bytes();      // to_space() is only used during scavenge
707}
708
709size_t PSYoungGen::capacity_in_words() const {
710  return eden_space()->capacity_in_words()
711       + from_space()->capacity_in_words();  // to_space() is only used during scavenge
712}
713
714
715size_t PSYoungGen::used_in_words() const {
716  return eden_space()->used_in_words()
717       + from_space()->used_in_words();      // to_space() is only used during scavenge
718}
719
720
721size_t PSYoungGen::free_in_words() const {
722  return eden_space()->free_in_words()
723       + from_space()->free_in_words();      // to_space() is only used during scavenge
724}
725
726void PSYoungGen::object_iterate(ObjectClosure* blk) {
727  eden_space()->object_iterate(blk);
728  from_space()->object_iterate(blk);
729  to_space()->object_iterate(blk);
730}
731
732void PSYoungGen::precompact() {
733  eden_mark_sweep()->precompact();
734  from_mark_sweep()->precompact();
735  to_mark_sweep()->precompact();
736}
737
738void PSYoungGen::adjust_pointers() {
739  eden_mark_sweep()->adjust_pointers();
740  from_mark_sweep()->adjust_pointers();
741  to_mark_sweep()->adjust_pointers();
742}
743
744void PSYoungGen::compact() {
745  eden_mark_sweep()->compact(ZapUnusedHeapArea);
746  from_mark_sweep()->compact(ZapUnusedHeapArea);
747  // Mark sweep stores preserved markOops in to space, don't disturb!
748  to_mark_sweep()->compact(false);
749}
750
751void PSYoungGen::print() const { print_on(tty); }
752void PSYoungGen::print_on(outputStream* st) const {
753  st->print(" %-15s", "PSYoungGen");
754  st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
755             capacity_in_bytes()/K, used_in_bytes()/K);
756  virtual_space()->print_space_boundaries_on(st);
757  st->print("  eden"); eden_space()->print_on(st);
758  st->print("  from"); from_space()->print_on(st);
759  st->print("  to  "); to_space()->print_on(st);
760}
761
762// Note that a space is not printed before the [NAME:
763void PSYoungGen::print_used_change(size_t prev_used) const {
764  log_info(gc, heap)("%s: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
765      name(), prev_used / K, used_in_bytes() / K, capacity_in_bytes() / K);
766}
767
768size_t PSYoungGen::available_for_expansion() {
769  ShouldNotReachHere();
770  return 0;
771}
772
773size_t PSYoungGen::available_for_contraction() {
774  ShouldNotReachHere();
775  return 0;
776}
777
778size_t PSYoungGen::available_to_min_gen() {
779  assert(virtual_space()->committed_size() >= min_gen_size(), "Invariant");
780  return virtual_space()->committed_size() - min_gen_size();
781}
782
783// This method assumes that from-space has live data and that
784// any shrinkage of the young gen is limited by location of
785// from-space.
786size_t PSYoungGen::available_to_live() {
787  size_t delta_in_survivor = 0;
788  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
789  const size_t space_alignment = heap->space_alignment();
790  const size_t gen_alignment = heap->generation_alignment();
791
792  MutableSpace* space_shrinking = NULL;
793  if (from_space()->end() > to_space()->end()) {
794    space_shrinking = from_space();
795  } else {
796    space_shrinking = to_space();
797  }
798
799  // Include any space that is committed but not included in
800  // the survivor spaces.
801  assert(((HeapWord*)virtual_space()->high()) >= space_shrinking->end(),
802    "Survivor space beyond high end");
803  size_t unused_committed = pointer_delta(virtual_space()->high(),
804    space_shrinking->end(), sizeof(char));
805
806  if (space_shrinking->is_empty()) {
807    // Don't let the space shrink to 0
808    assert(space_shrinking->capacity_in_bytes() >= space_alignment,
809      "Space is too small");
810    delta_in_survivor = space_shrinking->capacity_in_bytes() - space_alignment;
811  } else {
812    delta_in_survivor = pointer_delta(space_shrinking->end(),
813                                      space_shrinking->top(),
814                                      sizeof(char));
815  }
816
817  size_t delta_in_bytes = unused_committed + delta_in_survivor;
818  delta_in_bytes = align_size_down(delta_in_bytes, gen_alignment);
819  return delta_in_bytes;
820}
821
822// Return the number of bytes available for resizing down the young
823// generation.  This is the minimum of
824//      input "bytes"
825//      bytes to the minimum young gen size
826//      bytes to the size currently being used + some small extra
827size_t PSYoungGen::limit_gen_shrink(size_t bytes) {
828  // Allow shrinkage into the current eden but keep eden large enough
829  // to maintain the minimum young gen size
830  bytes = MIN3(bytes, available_to_min_gen(), available_to_live());
831  return align_size_down(bytes, virtual_space()->alignment());
832}
833
834void PSYoungGen::reset_after_change() {
835  ShouldNotReachHere();
836}
837
838void PSYoungGen::reset_survivors_after_shrink() {
839  _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
840                        (HeapWord*)virtual_space()->high_boundary());
841  PSScavenge::reference_processor()->set_span(_reserved);
842
843  MutableSpace* space_shrinking = NULL;
844  if (from_space()->end() > to_space()->end()) {
845    space_shrinking = from_space();
846  } else {
847    space_shrinking = to_space();
848  }
849
850  HeapWord* new_end = (HeapWord*)virtual_space()->high();
851  assert(new_end >= space_shrinking->bottom(), "Shrink was too large");
852  // Was there a shrink of the survivor space?
853  if (new_end < space_shrinking->end()) {
854    MemRegion mr(space_shrinking->bottom(), new_end);
855    space_shrinking->initialize(mr,
856                                SpaceDecorator::DontClear,
857                                SpaceDecorator::Mangle);
858  }
859}
860
861// This method currently does not expect to expand into eden (i.e.,
862// the virtual space boundaries is expected to be consistent
863// with the eden boundaries..
864void PSYoungGen::post_resize() {
865  assert_locked_or_safepoint(Heap_lock);
866  assert((eden_space()->bottom() < to_space()->bottom()) &&
867         (eden_space()->bottom() < from_space()->bottom()),
868         "Eden is assumed to be below the survivor spaces");
869
870  MemRegion cmr((HeapWord*)virtual_space()->low(),
871                (HeapWord*)virtual_space()->high());
872  ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr);
873  space_invariants();
874}
875
876
877
878void PSYoungGen::update_counters() {
879  if (UsePerfData) {
880    _eden_counters->update_all();
881    _from_counters->update_all();
882    _to_counters->update_all();
883    _gen_counters->update_all();
884  }
885}
886
887void PSYoungGen::verify() {
888  eden_space()->verify();
889  from_space()->verify();
890  to_space()->verify();
891}
892
893#ifndef PRODUCT
894void PSYoungGen::record_spaces_top() {
895  assert(ZapUnusedHeapArea, "Not mangling unused space");
896  eden_space()->set_top_for_allocations();
897  from_space()->set_top_for_allocations();
898  to_space()->set_top_for_allocations();
899}
900#endif
901