space.cpp revision 9056:dc9930a04ab0
1/*
2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/systemDictionary.hpp"
27#include "classfile/vmSymbols.hpp"
28#include "gc/serial/defNewGeneration.hpp"
29#include "gc/shared/blockOffsetTable.inline.hpp"
30#include "gc/shared/collectedHeap.inline.hpp"
31#include "gc/shared/genCollectedHeap.hpp"
32#include "gc/shared/genOopClosures.inline.hpp"
33#include "gc/shared/liveRange.hpp"
34#include "gc/shared/space.hpp"
35#include "gc/shared/space.inline.hpp"
36#include "gc/shared/spaceDecorator.hpp"
37#include "memory/universe.inline.hpp"
38#include "oops/oop.inline.hpp"
39#include "runtime/atomic.inline.hpp"
40#include "runtime/java.hpp"
41#include "runtime/orderAccess.inline.hpp"
42#include "runtime/prefetch.inline.hpp"
43#include "runtime/safepoint.hpp"
44#include "utilities/copy.hpp"
45#include "utilities/globalDefinitions.hpp"
46#include "utilities/macros.hpp"
47
48HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
49                                                HeapWord* top_obj) {
50  if (top_obj != NULL) {
51    if (_sp->block_is_obj(top_obj)) {
52      if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
53        if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
54          // An arrayOop is starting on the dirty card - since we do exact
55          // store checks for objArrays we are done.
56        } else {
57          // Otherwise, it is possible that the object starting on the dirty
58          // card spans the entire card, and that the store happened on a
59          // later card.  Figure out where the object ends.
60          // Use the block_size() method of the space over which
61          // the iteration is being done.  That space (e.g. CMS) may have
62          // specific requirements on object sizes which will
63          // be reflected in the block_size() method.
64          top = top_obj + oop(top_obj)->size();
65        }
66      }
67    } else {
68      top = top_obj;
69    }
70  } else {
71    assert(top == _sp->end(), "only case where top_obj == NULL");
72  }
73  return top;
74}
75
76void DirtyCardToOopClosure::walk_mem_region(MemRegion mr,
77                                            HeapWord* bottom,
78                                            HeapWord* top) {
79  // 1. Blocks may or may not be objects.
80  // 2. Even when a block_is_obj(), it may not entirely
81  //    occupy the block if the block quantum is larger than
82  //    the object size.
83  // We can and should try to optimize by calling the non-MemRegion
84  // version of oop_iterate() for all but the extremal objects
85  // (for which we need to call the MemRegion version of
86  // oop_iterate()) To be done post-beta XXX
87  for (; bottom < top; bottom += _sp->block_size(bottom)) {
88    // As in the case of contiguous space above, we'd like to
89    // just use the value returned by oop_iterate to increment the
90    // current pointer; unfortunately, that won't work in CMS because
91    // we'd need an interface change (it seems) to have the space
92    // "adjust the object size" (for instance pad it up to its
93    // block alignment or minimum block size restrictions. XXX
94    if (_sp->block_is_obj(bottom) &&
95        !_sp->obj_allocated_since_save_marks(oop(bottom))) {
96      oop(bottom)->oop_iterate(_cl, mr);
97    }
98  }
99}
100
101// We get called with "mr" representing the dirty region
102// that we want to process. Because of imprecise marking,
103// we may need to extend the incoming "mr" to the right,
104// and scan more. However, because we may already have
105// scanned some of that extended region, we may need to
106// trim its right-end back some so we do not scan what
107// we (or another worker thread) may already have scanned
108// or planning to scan.
109void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
110
111  // Some collectors need to do special things whenever their dirty
112  // cards are processed. For instance, CMS must remember mutator updates
113  // (i.e. dirty cards) so as to re-scan mutated objects.
114  // Such work can be piggy-backed here on dirty card scanning, so as to make
115  // it slightly more efficient than doing a complete non-destructive pre-scan
116  // of the card table.
117  MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
118  if (pCl != NULL) {
119    pCl->do_MemRegion(mr);
120  }
121
122  HeapWord* bottom = mr.start();
123  HeapWord* last = mr.last();
124  HeapWord* top = mr.end();
125  HeapWord* bottom_obj;
126  HeapWord* top_obj;
127
128  assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
129         _precision == CardTableModRefBS::Precise,
130         "Only ones we deal with for now.");
131
132  assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
133         _cl->idempotent() || _last_bottom == NULL ||
134         top <= _last_bottom,
135         "Not decreasing");
136  NOT_PRODUCT(_last_bottom = mr.start());
137
138  bottom_obj = _sp->block_start(bottom);
139  top_obj    = _sp->block_start(last);
140
141  assert(bottom_obj <= bottom, "just checking");
142  assert(top_obj    <= top,    "just checking");
143
144  // Given what we think is the top of the memory region and
145  // the start of the object at the top, get the actual
146  // value of the top.
147  top = get_actual_top(top, top_obj);
148
149  // If the previous call did some part of this region, don't redo.
150  if (_precision == CardTableModRefBS::ObjHeadPreciseArray &&
151      _min_done != NULL &&
152      _min_done < top) {
153    top = _min_done;
154  }
155
156  // Top may have been reset, and in fact may be below bottom,
157  // e.g. the dirty card region is entirely in a now free object
158  // -- something that could happen with a concurrent sweeper.
159  bottom = MIN2(bottom, top);
160  MemRegion extended_mr = MemRegion(bottom, top);
161  assert(bottom <= top &&
162         (_precision != CardTableModRefBS::ObjHeadPreciseArray ||
163          _min_done == NULL ||
164          top <= _min_done),
165         "overlap!");
166
167  // Walk the region if it is not empty; otherwise there is nothing to do.
168  if (!extended_mr.is_empty()) {
169    walk_mem_region(extended_mr, bottom_obj, top);
170  }
171
172  // An idempotent closure might be applied in any order, so we don't
173  // record a _min_done for it.
174  if (!_cl->idempotent()) {
175    _min_done = bottom;
176  } else {
177    assert(_min_done == _last_explicit_min_done,
178           "Don't update _min_done for idempotent cl");
179  }
180}
181
182DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl,
183                                          CardTableModRefBS::PrecisionStyle precision,
184                                          HeapWord* boundary,
185                                          bool parallel) {
186  return new DirtyCardToOopClosure(this, cl, precision, boundary);
187}
188
189HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
190                                               HeapWord* top_obj) {
191  if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
192    if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
193      if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
194        // An arrayOop is starting on the dirty card - since we do exact
195        // store checks for objArrays we are done.
196      } else {
197        // Otherwise, it is possible that the object starting on the dirty
198        // card spans the entire card, and that the store happened on a
199        // later card.  Figure out where the object ends.
200        assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(),
201          "Block size and object size mismatch");
202        top = top_obj + oop(top_obj)->size();
203      }
204    }
205  } else {
206    top = (_sp->toContiguousSpace())->top();
207  }
208  return top;
209}
210
211void Filtering_DCTOC::walk_mem_region(MemRegion mr,
212                                      HeapWord* bottom,
213                                      HeapWord* top) {
214  // Note that this assumption won't hold if we have a concurrent
215  // collector in this space, which may have freed up objects after
216  // they were dirtied and before the stop-the-world GC that is
217  // examining cards here.
218  assert(bottom < top, "ought to be at least one obj on a dirty card.");
219
220  if (_boundary != NULL) {
221    // We have a boundary outside of which we don't want to look
222    // at objects, so create a filtering closure around the
223    // oop closure before walking the region.
224    FilteringClosure filter(_boundary, _cl);
225    walk_mem_region_with_cl(mr, bottom, top, &filter);
226  } else {
227    // No boundary, simply walk the heap with the oop closure.
228    walk_mem_region_with_cl(mr, bottom, top, _cl);
229  }
230
231}
232
233// We must replicate this so that the static type of "FilteringClosure"
234// (see above) is apparent at the oop_iterate calls.
235#define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
236void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,        \
237                                                   HeapWord* bottom,    \
238                                                   HeapWord* top,       \
239                                                   ClosureType* cl) {   \
240  bottom += oop(bottom)->oop_iterate_size(cl, mr);                      \
241  if (bottom < top) {                                                   \
242    HeapWord* next_obj = bottom + oop(bottom)->size();                  \
243    while (next_obj < top) {                                            \
244      /* Bottom lies entirely below top, so we can call the */          \
245      /* non-memRegion version of oop_iterate below. */                 \
246      oop(bottom)->oop_iterate(cl);                                     \
247      bottom = next_obj;                                                \
248      next_obj = bottom + oop(bottom)->size();                          \
249    }                                                                   \
250    /* Last object. */                                                  \
251    oop(bottom)->oop_iterate(cl, mr);                                   \
252  }                                                                     \
253}
254
255// (There are only two of these, rather than N, because the split is due
256// only to the introduction of the FilteringClosure, a local part of the
257// impl of this abstraction.)
258ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
259ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
260
261DirtyCardToOopClosure*
262ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl,
263                             CardTableModRefBS::PrecisionStyle precision,
264                             HeapWord* boundary,
265                             bool parallel) {
266  return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
267}
268
269void Space::initialize(MemRegion mr,
270                       bool clear_space,
271                       bool mangle_space) {
272  HeapWord* bottom = mr.start();
273  HeapWord* end    = mr.end();
274  assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
275         "invalid space boundaries");
276  set_bottom(bottom);
277  set_end(end);
278  if (clear_space) clear(mangle_space);
279}
280
281void Space::clear(bool mangle_space) {
282  if (ZapUnusedHeapArea && mangle_space) {
283    mangle_unused_area();
284  }
285}
286
287ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL),
288    _concurrent_iteration_safe_limit(NULL) {
289  _mangler = new GenSpaceMangler(this);
290}
291
292ContiguousSpace::~ContiguousSpace() {
293  delete _mangler;
294}
295
296void ContiguousSpace::initialize(MemRegion mr,
297                                 bool clear_space,
298                                 bool mangle_space)
299{
300  CompactibleSpace::initialize(mr, clear_space, mangle_space);
301  set_concurrent_iteration_safe_limit(top());
302}
303
304void ContiguousSpace::clear(bool mangle_space) {
305  set_top(bottom());
306  set_saved_mark();
307  CompactibleSpace::clear(mangle_space);
308}
309
310bool ContiguousSpace::is_free_block(const HeapWord* p) const {
311  return p >= _top;
312}
313
314void OffsetTableContigSpace::clear(bool mangle_space) {
315  ContiguousSpace::clear(mangle_space);
316  _offsets.initialize_threshold();
317}
318
319void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
320  Space::set_bottom(new_bottom);
321  _offsets.set_bottom(new_bottom);
322}
323
324void OffsetTableContigSpace::set_end(HeapWord* new_end) {
325  // Space should not advertise an increase in size
326  // until after the underlying offset table has been enlarged.
327  _offsets.resize(pointer_delta(new_end, bottom()));
328  Space::set_end(new_end);
329}
330
331#ifndef PRODUCT
332
333void ContiguousSpace::set_top_for_allocations(HeapWord* v) {
334  mangler()->set_top_for_allocations(v);
335}
336void ContiguousSpace::set_top_for_allocations() {
337  mangler()->set_top_for_allocations(top());
338}
339void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) {
340  mangler()->check_mangled_unused_area(limit);
341}
342
343void ContiguousSpace::check_mangled_unused_area_complete() {
344  mangler()->check_mangled_unused_area_complete();
345}
346
347// Mangled only the unused space that has not previously
348// been mangled and that has not been allocated since being
349// mangled.
350void ContiguousSpace::mangle_unused_area() {
351  mangler()->mangle_unused_area();
352}
353void ContiguousSpace::mangle_unused_area_complete() {
354  mangler()->mangle_unused_area_complete();
355}
356#endif  // NOT_PRODUCT
357
358void CompactibleSpace::initialize(MemRegion mr,
359                                  bool clear_space,
360                                  bool mangle_space) {
361  Space::initialize(mr, clear_space, mangle_space);
362  set_compaction_top(bottom());
363  _next_compaction_space = NULL;
364}
365
366void CompactibleSpace::clear(bool mangle_space) {
367  Space::clear(mangle_space);
368  _compaction_top = bottom();
369}
370
371HeapWord* CompactibleSpace::forward(oop q, size_t size,
372                                    CompactPoint* cp, HeapWord* compact_top) {
373  // q is alive
374  // First check if we should switch compaction space
375  assert(this == cp->space, "'this' should be current compaction space.");
376  size_t compaction_max_size = pointer_delta(end(), compact_top);
377  while (size > compaction_max_size) {
378    // switch to next compaction space
379    cp->space->set_compaction_top(compact_top);
380    cp->space = cp->space->next_compaction_space();
381    if (cp->space == NULL) {
382      cp->gen = GenCollectedHeap::heap()->young_gen();
383      assert(cp->gen != NULL, "compaction must succeed");
384      cp->space = cp->gen->first_compaction_space();
385      assert(cp->space != NULL, "generation must have a first compaction space");
386    }
387    compact_top = cp->space->bottom();
388    cp->space->set_compaction_top(compact_top);
389    cp->threshold = cp->space->initialize_threshold();
390    compaction_max_size = pointer_delta(cp->space->end(), compact_top);
391  }
392
393  // store the forwarding pointer into the mark word
394  if ((HeapWord*)q != compact_top) {
395    q->forward_to(oop(compact_top));
396    assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
397  } else {
398    // if the object isn't moving we can just set the mark to the default
399    // mark and handle it specially later on.
400    q->init_mark();
401    assert(q->forwardee() == NULL, "should be forwarded to NULL");
402  }
403
404  compact_top += size;
405
406  // we need to update the offset table so that the beginnings of objects can be
407  // found during scavenge.  Note that we are updating the offset table based on
408  // where the object will be once the compaction phase finishes.
409  if (compact_top > cp->threshold)
410    cp->threshold =
411      cp->space->cross_threshold(compact_top - size, compact_top);
412  return compact_top;
413}
414
415
416bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
417                                        HeapWord* q, size_t deadlength) {
418  if (allowed_deadspace_words >= deadlength) {
419    allowed_deadspace_words -= deadlength;
420    CollectedHeap::fill_with_object(q, deadlength);
421    oop(q)->set_mark(oop(q)->mark()->set_marked());
422    assert((int) deadlength == oop(q)->size(), "bad filler object size");
423    // Recall that we required "q == compaction_top".
424    return true;
425  } else {
426    allowed_deadspace_words = 0;
427    return false;
428  }
429}
430
431void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
432  scan_and_forward(this, cp);
433}
434
435void CompactibleSpace::adjust_pointers() {
436  // Check first is there is any work to do.
437  if (used() == 0) {
438    return;   // Nothing to do.
439  }
440
441  scan_and_adjust_pointers(this);
442}
443
444void CompactibleSpace::compact() {
445  scan_and_compact(this);
446}
447
448void Space::print_short() const { print_short_on(tty); }
449
450void Space::print_short_on(outputStream* st) const {
451  st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K,
452              (int) ((double) used() * 100 / capacity()));
453}
454
455void Space::print() const { print_on(tty); }
456
457void Space::print_on(outputStream* st) const {
458  print_short_on(st);
459  st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")",
460                p2i(bottom()), p2i(end()));
461}
462
463void ContiguousSpace::print_on(outputStream* st) const {
464  print_short_on(st);
465  st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
466                p2i(bottom()), p2i(top()), p2i(end()));
467}
468
469void OffsetTableContigSpace::print_on(outputStream* st) const {
470  print_short_on(st);
471  st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
472                INTPTR_FORMAT ", " INTPTR_FORMAT ")",
473              p2i(bottom()), p2i(top()), p2i(_offsets.threshold()), p2i(end()));
474}
475
476void ContiguousSpace::verify() const {
477  HeapWord* p = bottom();
478  HeapWord* t = top();
479  HeapWord* prev_p = NULL;
480  while (p < t) {
481    oop(p)->verify();
482    prev_p = p;
483    p += oop(p)->size();
484  }
485  guarantee(p == top(), "end of last object must match end of space");
486  if (top() != end()) {
487    guarantee(top() == block_start_const(end()-1) &&
488              top() == block_start_const(top()),
489              "top should be start of unallocated block, if it exists");
490  }
491}
492
493void Space::oop_iterate(ExtendedOopClosure* blk) {
494  ObjectToOopClosure blk2(blk);
495  object_iterate(&blk2);
496}
497
498bool Space::obj_is_alive(const HeapWord* p) const {
499  assert (block_is_obj(p), "The address should point to an object");
500  return true;
501}
502
503#if INCLUDE_ALL_GCS
504#define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)         \
505                                                                            \
506  void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\
507    HeapWord* obj_addr = mr.start();                                        \
508    HeapWord* t = mr.end();                                                 \
509    while (obj_addr < t) {                                                  \
510      assert(oop(obj_addr)->is_oop(), "Should be an oop");                  \
511      obj_addr += oop(obj_addr)->oop_iterate_size(blk);                     \
512    }                                                                       \
513  }
514
515  ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN)
516
517#undef ContigSpace_PAR_OOP_ITERATE_DEFN
518#endif // INCLUDE_ALL_GCS
519
520void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) {
521  if (is_empty()) return;
522  HeapWord* obj_addr = bottom();
523  HeapWord* t = top();
524  // Could call objects iterate, but this is easier.
525  while (obj_addr < t) {
526    obj_addr += oop(obj_addr)->oop_iterate_size(blk);
527  }
528}
529
530void ContiguousSpace::object_iterate(ObjectClosure* blk) {
531  if (is_empty()) return;
532  WaterMark bm = bottom_mark();
533  object_iterate_from(bm, blk);
534}
535
536// For a ContiguousSpace object_iterate() and safe_object_iterate()
537// are the same.
538void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
539  object_iterate(blk);
540}
541
542void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) {
543  assert(mark.space() == this, "Mark does not match space");
544  HeapWord* p = mark.point();
545  while (p < top()) {
546    blk->do_object(oop(p));
547    p += oop(p)->size();
548  }
549}
550
551HeapWord*
552ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) {
553  HeapWord * limit = concurrent_iteration_safe_limit();
554  assert(limit <= top(), "sanity check");
555  for (HeapWord* p = bottom(); p < limit;) {
556    size_t size = blk->do_object_careful(oop(p));
557    if (size == 0) {
558      return p;  // failed at p
559    } else {
560      p += size;
561    }
562  }
563  return NULL; // all done
564}
565
566#define ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)  \
567                                                                          \
568void ContiguousSpace::                                                    \
569oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {            \
570  HeapWord* t;                                                            \
571  HeapWord* p = saved_mark_word();                                        \
572  assert(p != NULL, "expected saved mark");                               \
573                                                                          \
574  const intx interval = PrefetchScanIntervalInBytes;                      \
575  do {                                                                    \
576    t = top();                                                            \
577    while (p < t) {                                                       \
578      Prefetch::write(p, interval);                                       \
579      debug_only(HeapWord* prev = p);                                     \
580      oop m = oop(p);                                                     \
581      p += m->oop_iterate_size(blk);                                      \
582    }                                                                     \
583  } while (t < top());                                                    \
584                                                                          \
585  set_saved_mark_word(p);                                                 \
586}
587
588ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN)
589
590#undef ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN
591
592// Very general, slow implementation.
593HeapWord* ContiguousSpace::block_start_const(const void* p) const {
594  assert(MemRegion(bottom(), end()).contains(p),
595         "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
596         p2i(p), p2i(bottom()), p2i(end()));
597  if (p >= top()) {
598    return top();
599  } else {
600    HeapWord* last = bottom();
601    HeapWord* cur = last;
602    while (cur <= p) {
603      last = cur;
604      cur += oop(cur)->size();
605    }
606    assert(oop(last)->is_oop(), PTR_FORMAT " should be an object start", p2i(last));
607    return last;
608  }
609}
610
611size_t ContiguousSpace::block_size(const HeapWord* p) const {
612  assert(MemRegion(bottom(), end()).contains(p),
613         "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
614         p2i(p), p2i(bottom()), p2i(end()));
615  HeapWord* current_top = top();
616  assert(p <= current_top,
617         "p > current top - p: " PTR_FORMAT ", current top: " PTR_FORMAT,
618         p2i(p), p2i(current_top));
619  assert(p == current_top || oop(p)->is_oop(),
620         "p (" PTR_FORMAT ") is not a block start - "
621         "current_top: " PTR_FORMAT ", is_oop: %s",
622         p2i(p), p2i(current_top), BOOL_TO_STR(oop(p)->is_oop()));
623  if (p < current_top) {
624    return oop(p)->size();
625  } else {
626    assert(p == current_top, "just checking");
627    return pointer_delta(end(), (HeapWord*) p);
628  }
629}
630
631// This version requires locking.
632inline HeapWord* ContiguousSpace::allocate_impl(size_t size) {
633  assert(Heap_lock->owned_by_self() ||
634         (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
635         "not locked");
636  HeapWord* obj = top();
637  if (pointer_delta(end(), obj) >= size) {
638    HeapWord* new_top = obj + size;
639    set_top(new_top);
640    assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
641    return obj;
642  } else {
643    return NULL;
644  }
645}
646
647// This version is lock-free.
648inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size) {
649  do {
650    HeapWord* obj = top();
651    if (pointer_delta(end(), obj) >= size) {
652      HeapWord* new_top = obj + size;
653      HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
654      // result can be one of two:
655      //  the old top value: the exchange succeeded
656      //  otherwise: the new value of the top is returned.
657      if (result == obj) {
658        assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
659        return obj;
660      }
661    } else {
662      return NULL;
663    }
664  } while (true);
665}
666
667HeapWord* ContiguousSpace::allocate_aligned(size_t size) {
668  assert(Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), "not locked");
669  HeapWord* end_value = end();
670
671  HeapWord* obj = CollectedHeap::align_allocation_or_fail(top(), end_value, SurvivorAlignmentInBytes);
672  if (obj == NULL) {
673    return NULL;
674  }
675
676  if (pointer_delta(end_value, obj) >= size) {
677    HeapWord* new_top = obj + size;
678    set_top(new_top);
679    assert(is_ptr_aligned(obj, SurvivorAlignmentInBytes) && is_aligned(new_top),
680      "checking alignment");
681    return obj;
682  } else {
683    set_top(obj);
684    return NULL;
685  }
686}
687
688// Requires locking.
689HeapWord* ContiguousSpace::allocate(size_t size) {
690  return allocate_impl(size);
691}
692
693// Lock-free.
694HeapWord* ContiguousSpace::par_allocate(size_t size) {
695  return par_allocate_impl(size);
696}
697
698void ContiguousSpace::allocate_temporary_filler(int factor) {
699  // allocate temporary type array decreasing free size with factor 'factor'
700  assert(factor >= 0, "just checking");
701  size_t size = pointer_delta(end(), top());
702
703  // if space is full, return
704  if (size == 0) return;
705
706  if (factor > 0) {
707    size -= size/factor;
708  }
709  size = align_object_size(size);
710
711  const size_t array_header_size = typeArrayOopDesc::header_size(T_INT);
712  if (size >= (size_t)align_object_size(array_header_size)) {
713    size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint));
714    // allocate uninitialized int array
715    typeArrayOop t = (typeArrayOop) allocate(size);
716    assert(t != NULL, "allocation should succeed");
717    t->set_mark(markOopDesc::prototype());
718    t->set_klass(Universe::intArrayKlassObj());
719    t->set_length((int)length);
720  } else {
721    assert(size == CollectedHeap::min_fill_size(),
722           "size for smallest fake object doesn't match");
723    instanceOop obj = (instanceOop) allocate(size);
724    obj->set_mark(markOopDesc::prototype());
725    obj->set_klass_gap(0);
726    obj->set_klass(SystemDictionary::Object_klass());
727  }
728}
729
730HeapWord* OffsetTableContigSpace::initialize_threshold() {
731  return _offsets.initialize_threshold();
732}
733
734HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) {
735  _offsets.alloc_block(start, end);
736  return _offsets.threshold();
737}
738
739OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
740                                               MemRegion mr) :
741  _offsets(sharedOffsetArray, mr),
742  _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
743{
744  _offsets.set_contig_space(this);
745  initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
746}
747
748#define OBJ_SAMPLE_INTERVAL 0
749#define BLOCK_SAMPLE_INTERVAL 100
750
751void OffsetTableContigSpace::verify() const {
752  HeapWord* p = bottom();
753  HeapWord* prev_p = NULL;
754  int objs = 0;
755  int blocks = 0;
756
757  if (VerifyObjectStartArray) {
758    _offsets.verify();
759  }
760
761  while (p < top()) {
762    size_t size = oop(p)->size();
763    // For a sampling of objects in the space, find it using the
764    // block offset table.
765    if (blocks == BLOCK_SAMPLE_INTERVAL) {
766      guarantee(p == block_start_const(p + (size/2)),
767                "check offset computation");
768      blocks = 0;
769    } else {
770      blocks++;
771    }
772
773    if (objs == OBJ_SAMPLE_INTERVAL) {
774      oop(p)->verify();
775      objs = 0;
776    } else {
777      objs++;
778    }
779    prev_p = p;
780    p += size;
781  }
782  guarantee(p == top(), "end of last object must match end of space");
783}
784
785
786size_t TenuredSpace::allowed_dead_ratio() const {
787  return MarkSweepDeadRatio;
788}
789