space.cpp revision 13243:7235bc30c0d7
1/*
2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/systemDictionary.hpp"
27#include "classfile/vmSymbols.hpp"
28#include "gc/serial/defNewGeneration.hpp"
29#include "gc/shared/blockOffsetTable.inline.hpp"
30#include "gc/shared/collectedHeap.inline.hpp"
31#include "gc/shared/genCollectedHeap.hpp"
32#include "gc/shared/genOopClosures.inline.hpp"
33#include "gc/shared/space.hpp"
34#include "gc/shared/space.inline.hpp"
35#include "gc/shared/spaceDecorator.hpp"
36#include "memory/universe.inline.hpp"
37#include "oops/oop.inline.hpp"
38#include "runtime/atomic.hpp"
39#include "runtime/java.hpp"
40#include "runtime/orderAccess.inline.hpp"
41#include "runtime/prefetch.inline.hpp"
42#include "runtime/safepoint.hpp"
43#include "utilities/copy.hpp"
44#include "utilities/globalDefinitions.hpp"
45#include "utilities/macros.hpp"
46
47HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
48                                                HeapWord* top_obj) {
49  if (top_obj != NULL) {
50    if (_sp->block_is_obj(top_obj)) {
51      if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
52        if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
53          // An arrayOop is starting on the dirty card - since we do exact
54          // store checks for objArrays we are done.
55        } else {
56          // Otherwise, it is possible that the object starting on the dirty
57          // card spans the entire card, and that the store happened on a
58          // later card.  Figure out where the object ends.
59          // Use the block_size() method of the space over which
60          // the iteration is being done.  That space (e.g. CMS) may have
61          // specific requirements on object sizes which will
62          // be reflected in the block_size() method.
63          top = top_obj + oop(top_obj)->size();
64        }
65      }
66    } else {
67      top = top_obj;
68    }
69  } else {
70    assert(top == _sp->end(), "only case where top_obj == NULL");
71  }
72  return top;
73}
74
75void DirtyCardToOopClosure::walk_mem_region(MemRegion mr,
76                                            HeapWord* bottom,
77                                            HeapWord* top) {
78  // 1. Blocks may or may not be objects.
79  // 2. Even when a block_is_obj(), it may not entirely
80  //    occupy the block if the block quantum is larger than
81  //    the object size.
82  // We can and should try to optimize by calling the non-MemRegion
83  // version of oop_iterate() for all but the extremal objects
84  // (for which we need to call the MemRegion version of
85  // oop_iterate()) To be done post-beta XXX
86  for (; bottom < top; bottom += _sp->block_size(bottom)) {
87    // As in the case of contiguous space above, we'd like to
88    // just use the value returned by oop_iterate to increment the
89    // current pointer; unfortunately, that won't work in CMS because
90    // we'd need an interface change (it seems) to have the space
91    // "adjust the object size" (for instance pad it up to its
92    // block alignment or minimum block size restrictions. XXX
93    if (_sp->block_is_obj(bottom) &&
94        !_sp->obj_allocated_since_save_marks(oop(bottom))) {
95      oop(bottom)->oop_iterate(_cl, mr);
96    }
97  }
98}
99
100// We get called with "mr" representing the dirty region
101// that we want to process. Because of imprecise marking,
102// we may need to extend the incoming "mr" to the right,
103// and scan more. However, because we may already have
104// scanned some of that extended region, we may need to
105// trim its right-end back some so we do not scan what
106// we (or another worker thread) may already have scanned
107// or planning to scan.
108void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
109
110  // Some collectors need to do special things whenever their dirty
111  // cards are processed. For instance, CMS must remember mutator updates
112  // (i.e. dirty cards) so as to re-scan mutated objects.
113  // Such work can be piggy-backed here on dirty card scanning, so as to make
114  // it slightly more efficient than doing a complete non-destructive pre-scan
115  // of the card table.
116  MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
117  if (pCl != NULL) {
118    pCl->do_MemRegion(mr);
119  }
120
121  HeapWord* bottom = mr.start();
122  HeapWord* last = mr.last();
123  HeapWord* top = mr.end();
124  HeapWord* bottom_obj;
125  HeapWord* top_obj;
126
127  assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
128         _precision == CardTableModRefBS::Precise,
129         "Only ones we deal with for now.");
130
131  assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
132         _cl->idempotent() || _last_bottom == NULL ||
133         top <= _last_bottom,
134         "Not decreasing");
135  NOT_PRODUCT(_last_bottom = mr.start());
136
137  bottom_obj = _sp->block_start(bottom);
138  top_obj    = _sp->block_start(last);
139
140  assert(bottom_obj <= bottom, "just checking");
141  assert(top_obj    <= top,    "just checking");
142
143  // Given what we think is the top of the memory region and
144  // the start of the object at the top, get the actual
145  // value of the top.
146  top = get_actual_top(top, top_obj);
147
148  // If the previous call did some part of this region, don't redo.
149  if (_precision == CardTableModRefBS::ObjHeadPreciseArray &&
150      _min_done != NULL &&
151      _min_done < top) {
152    top = _min_done;
153  }
154
155  // Top may have been reset, and in fact may be below bottom,
156  // e.g. the dirty card region is entirely in a now free object
157  // -- something that could happen with a concurrent sweeper.
158  bottom = MIN2(bottom, top);
159  MemRegion extended_mr = MemRegion(bottom, top);
160  assert(bottom <= top &&
161         (_precision != CardTableModRefBS::ObjHeadPreciseArray ||
162          _min_done == NULL ||
163          top <= _min_done),
164         "overlap!");
165
166  // Walk the region if it is not empty; otherwise there is nothing to do.
167  if (!extended_mr.is_empty()) {
168    walk_mem_region(extended_mr, bottom_obj, top);
169  }
170
171  // An idempotent closure might be applied in any order, so we don't
172  // record a _min_done for it.
173  if (!_cl->idempotent()) {
174    _min_done = bottom;
175  } else {
176    assert(_min_done == _last_explicit_min_done,
177           "Don't update _min_done for idempotent cl");
178  }
179}
180
181DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl,
182                                          CardTableModRefBS::PrecisionStyle precision,
183                                          HeapWord* boundary,
184                                          bool parallel) {
185  return new DirtyCardToOopClosure(this, cl, precision, boundary);
186}
187
188HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
189                                               HeapWord* top_obj) {
190  if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
191    if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
192      if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
193        // An arrayOop is starting on the dirty card - since we do exact
194        // store checks for objArrays we are done.
195      } else {
196        // Otherwise, it is possible that the object starting on the dirty
197        // card spans the entire card, and that the store happened on a
198        // later card.  Figure out where the object ends.
199        assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(),
200          "Block size and object size mismatch");
201        top = top_obj + oop(top_obj)->size();
202      }
203    }
204  } else {
205    top = (_sp->toContiguousSpace())->top();
206  }
207  return top;
208}
209
210void FilteringDCTOC::walk_mem_region(MemRegion mr,
211                                     HeapWord* bottom,
212                                     HeapWord* top) {
213  // Note that this assumption won't hold if we have a concurrent
214  // collector in this space, which may have freed up objects after
215  // they were dirtied and before the stop-the-world GC that is
216  // examining cards here.
217  assert(bottom < top, "ought to be at least one obj on a dirty card.");
218
219  if (_boundary != NULL) {
220    // We have a boundary outside of which we don't want to look
221    // at objects, so create a filtering closure around the
222    // oop closure before walking the region.
223    FilteringClosure filter(_boundary, _cl);
224    walk_mem_region_with_cl(mr, bottom, top, &filter);
225  } else {
226    // No boundary, simply walk the heap with the oop closure.
227    walk_mem_region_with_cl(mr, bottom, top, _cl);
228  }
229
230}
231
232// We must replicate this so that the static type of "FilteringClosure"
233// (see above) is apparent at the oop_iterate calls.
234#define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
235void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,        \
236                                                   HeapWord* bottom,    \
237                                                   HeapWord* top,       \
238                                                   ClosureType* cl) {   \
239  bottom += oop(bottom)->oop_iterate_size(cl, mr);                      \
240  if (bottom < top) {                                                   \
241    HeapWord* next_obj = bottom + oop(bottom)->size();                  \
242    while (next_obj < top) {                                            \
243      /* Bottom lies entirely below top, so we can call the */          \
244      /* non-memRegion version of oop_iterate below. */                 \
245      oop(bottom)->oop_iterate(cl);                                     \
246      bottom = next_obj;                                                \
247      next_obj = bottom + oop(bottom)->size();                          \
248    }                                                                   \
249    /* Last object. */                                                  \
250    oop(bottom)->oop_iterate(cl, mr);                                   \
251  }                                                                     \
252}
253
254// (There are only two of these, rather than N, because the split is due
255// only to the introduction of the FilteringClosure, a local part of the
256// impl of this abstraction.)
257ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
258ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
259
260DirtyCardToOopClosure*
261ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl,
262                             CardTableModRefBS::PrecisionStyle precision,
263                             HeapWord* boundary,
264                             bool parallel) {
265  return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
266}
267
268void Space::initialize(MemRegion mr,
269                       bool clear_space,
270                       bool mangle_space) {
271  HeapWord* bottom = mr.start();
272  HeapWord* end    = mr.end();
273  assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
274         "invalid space boundaries");
275  set_bottom(bottom);
276  set_end(end);
277  if (clear_space) clear(mangle_space);
278}
279
280void Space::clear(bool mangle_space) {
281  if (ZapUnusedHeapArea && mangle_space) {
282    mangle_unused_area();
283  }
284}
285
286ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL),
287    _concurrent_iteration_safe_limit(NULL) {
288  _mangler = new GenSpaceMangler(this);
289}
290
291ContiguousSpace::~ContiguousSpace() {
292  delete _mangler;
293}
294
295void ContiguousSpace::initialize(MemRegion mr,
296                                 bool clear_space,
297                                 bool mangle_space)
298{
299  CompactibleSpace::initialize(mr, clear_space, mangle_space);
300  set_concurrent_iteration_safe_limit(top());
301}
302
303void ContiguousSpace::clear(bool mangle_space) {
304  set_top(bottom());
305  set_saved_mark();
306  CompactibleSpace::clear(mangle_space);
307}
308
309bool ContiguousSpace::is_free_block(const HeapWord* p) const {
310  return p >= _top;
311}
312
313void OffsetTableContigSpace::clear(bool mangle_space) {
314  ContiguousSpace::clear(mangle_space);
315  _offsets.initialize_threshold();
316}
317
318void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
319  Space::set_bottom(new_bottom);
320  _offsets.set_bottom(new_bottom);
321}
322
323void OffsetTableContigSpace::set_end(HeapWord* new_end) {
324  // Space should not advertise an increase in size
325  // until after the underlying offset table has been enlarged.
326  _offsets.resize(pointer_delta(new_end, bottom()));
327  Space::set_end(new_end);
328}
329
330#ifndef PRODUCT
331
332void ContiguousSpace::set_top_for_allocations(HeapWord* v) {
333  mangler()->set_top_for_allocations(v);
334}
335void ContiguousSpace::set_top_for_allocations() {
336  mangler()->set_top_for_allocations(top());
337}
338void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) {
339  mangler()->check_mangled_unused_area(limit);
340}
341
342void ContiguousSpace::check_mangled_unused_area_complete() {
343  mangler()->check_mangled_unused_area_complete();
344}
345
346// Mangled only the unused space that has not previously
347// been mangled and that has not been allocated since being
348// mangled.
349void ContiguousSpace::mangle_unused_area() {
350  mangler()->mangle_unused_area();
351}
352void ContiguousSpace::mangle_unused_area_complete() {
353  mangler()->mangle_unused_area_complete();
354}
355#endif  // NOT_PRODUCT
356
357void CompactibleSpace::initialize(MemRegion mr,
358                                  bool clear_space,
359                                  bool mangle_space) {
360  Space::initialize(mr, clear_space, mangle_space);
361  set_compaction_top(bottom());
362  _next_compaction_space = NULL;
363}
364
365void CompactibleSpace::clear(bool mangle_space) {
366  Space::clear(mangle_space);
367  _compaction_top = bottom();
368}
369
370HeapWord* CompactibleSpace::forward(oop q, size_t size,
371                                    CompactPoint* cp, HeapWord* compact_top) {
372  // q is alive
373  // First check if we should switch compaction space
374  assert(this == cp->space, "'this' should be current compaction space.");
375  size_t compaction_max_size = pointer_delta(end(), compact_top);
376  while (size > compaction_max_size) {
377    // switch to next compaction space
378    cp->space->set_compaction_top(compact_top);
379    cp->space = cp->space->next_compaction_space();
380    if (cp->space == NULL) {
381      cp->gen = GenCollectedHeap::heap()->young_gen();
382      assert(cp->gen != NULL, "compaction must succeed");
383      cp->space = cp->gen->first_compaction_space();
384      assert(cp->space != NULL, "generation must have a first compaction space");
385    }
386    compact_top = cp->space->bottom();
387    cp->space->set_compaction_top(compact_top);
388    cp->threshold = cp->space->initialize_threshold();
389    compaction_max_size = pointer_delta(cp->space->end(), compact_top);
390  }
391
392  // store the forwarding pointer into the mark word
393  if ((HeapWord*)q != compact_top) {
394    q->forward_to(oop(compact_top));
395    assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
396  } else {
397    // if the object isn't moving we can just set the mark to the default
398    // mark and handle it specially later on.
399    q->init_mark();
400    assert(q->forwardee() == NULL, "should be forwarded to NULL");
401  }
402
403  compact_top += size;
404
405  // we need to update the offset table so that the beginnings of objects can be
406  // found during scavenge.  Note that we are updating the offset table based on
407  // where the object will be once the compaction phase finishes.
408  if (compact_top > cp->threshold)
409    cp->threshold =
410      cp->space->cross_threshold(compact_top - size, compact_top);
411  return compact_top;
412}
413
414void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
415  scan_and_forward(this, cp);
416}
417
418void CompactibleSpace::adjust_pointers() {
419  // Check first is there is any work to do.
420  if (used() == 0) {
421    return;   // Nothing to do.
422  }
423
424  scan_and_adjust_pointers(this);
425}
426
427void CompactibleSpace::compact() {
428  scan_and_compact(this);
429}
430
431void Space::print_short() const { print_short_on(tty); }
432
433void Space::print_short_on(outputStream* st) const {
434  st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K,
435              (int) ((double) used() * 100 / capacity()));
436}
437
438void Space::print() const { print_on(tty); }
439
440void Space::print_on(outputStream* st) const {
441  print_short_on(st);
442  st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")",
443                p2i(bottom()), p2i(end()));
444}
445
446void ContiguousSpace::print_on(outputStream* st) const {
447  print_short_on(st);
448  st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
449                p2i(bottom()), p2i(top()), p2i(end()));
450}
451
452void OffsetTableContigSpace::print_on(outputStream* st) const {
453  print_short_on(st);
454  st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
455                INTPTR_FORMAT ", " INTPTR_FORMAT ")",
456              p2i(bottom()), p2i(top()), p2i(_offsets.threshold()), p2i(end()));
457}
458
459void ContiguousSpace::verify() const {
460  HeapWord* p = bottom();
461  HeapWord* t = top();
462  HeapWord* prev_p = NULL;
463  while (p < t) {
464    oop(p)->verify();
465    prev_p = p;
466    p += oop(p)->size();
467  }
468  guarantee(p == top(), "end of last object must match end of space");
469  if (top() != end()) {
470    guarantee(top() == block_start_const(end()-1) &&
471              top() == block_start_const(top()),
472              "top should be start of unallocated block, if it exists");
473  }
474}
475
476void Space::oop_iterate(ExtendedOopClosure* blk) {
477  ObjectToOopClosure blk2(blk);
478  object_iterate(&blk2);
479}
480
481bool Space::obj_is_alive(const HeapWord* p) const {
482  assert (block_is_obj(p), "The address should point to an object");
483  return true;
484}
485
486#if INCLUDE_ALL_GCS
487#define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)         \
488                                                                            \
489  void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\
490    HeapWord* obj_addr = mr.start();                                        \
491    HeapWord* t = mr.end();                                                 \
492    while (obj_addr < t) {                                                  \
493      assert(oop(obj_addr)->is_oop(), "Should be an oop");                  \
494      obj_addr += oop(obj_addr)->oop_iterate_size(blk);                     \
495    }                                                                       \
496  }
497
498  ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN)
499
500#undef ContigSpace_PAR_OOP_ITERATE_DEFN
501#endif // INCLUDE_ALL_GCS
502
503void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) {
504  if (is_empty()) return;
505  HeapWord* obj_addr = bottom();
506  HeapWord* t = top();
507  // Could call objects iterate, but this is easier.
508  while (obj_addr < t) {
509    obj_addr += oop(obj_addr)->oop_iterate_size(blk);
510  }
511}
512
513void ContiguousSpace::object_iterate(ObjectClosure* blk) {
514  if (is_empty()) return;
515  object_iterate_from(bottom(), blk);
516}
517
518// For a ContiguousSpace object_iterate() and safe_object_iterate()
519// are the same.
520void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
521  object_iterate(blk);
522}
523
524void ContiguousSpace::object_iterate_from(HeapWord* mark, ObjectClosure* blk) {
525  while (mark < top()) {
526    blk->do_object(oop(mark));
527    mark += oop(mark)->size();
528  }
529}
530
531HeapWord*
532ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) {
533  HeapWord * limit = concurrent_iteration_safe_limit();
534  assert(limit <= top(), "sanity check");
535  for (HeapWord* p = bottom(); p < limit;) {
536    size_t size = blk->do_object_careful(oop(p));
537    if (size == 0) {
538      return p;  // failed at p
539    } else {
540      p += size;
541    }
542  }
543  return NULL; // all done
544}
545
546#define ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)  \
547                                                                          \
548void ContiguousSpace::                                                    \
549oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {            \
550  HeapWord* t;                                                            \
551  HeapWord* p = saved_mark_word();                                        \
552  assert(p != NULL, "expected saved mark");                               \
553                                                                          \
554  const intx interval = PrefetchScanIntervalInBytes;                      \
555  do {                                                                    \
556    t = top();                                                            \
557    while (p < t) {                                                       \
558      Prefetch::write(p, interval);                                       \
559      debug_only(HeapWord* prev = p);                                     \
560      oop m = oop(p);                                                     \
561      p += m->oop_iterate_size(blk);                                      \
562    }                                                                     \
563  } while (t < top());                                                    \
564                                                                          \
565  set_saved_mark_word(p);                                                 \
566}
567
568ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN)
569
570#undef ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN
571
572// Very general, slow implementation.
573HeapWord* ContiguousSpace::block_start_const(const void* p) const {
574  assert(MemRegion(bottom(), end()).contains(p),
575         "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
576         p2i(p), p2i(bottom()), p2i(end()));
577  if (p >= top()) {
578    return top();
579  } else {
580    HeapWord* last = bottom();
581    HeapWord* cur = last;
582    while (cur <= p) {
583      last = cur;
584      cur += oop(cur)->size();
585    }
586    assert(oop(last)->is_oop(), PTR_FORMAT " should be an object start", p2i(last));
587    return last;
588  }
589}
590
591size_t ContiguousSpace::block_size(const HeapWord* p) const {
592  assert(MemRegion(bottom(), end()).contains(p),
593         "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
594         p2i(p), p2i(bottom()), p2i(end()));
595  HeapWord* current_top = top();
596  assert(p <= current_top,
597         "p > current top - p: " PTR_FORMAT ", current top: " PTR_FORMAT,
598         p2i(p), p2i(current_top));
599  assert(p == current_top || oop(p)->is_oop(),
600         "p (" PTR_FORMAT ") is not a block start - "
601         "current_top: " PTR_FORMAT ", is_oop: %s",
602         p2i(p), p2i(current_top), BOOL_TO_STR(oop(p)->is_oop()));
603  if (p < current_top) {
604    return oop(p)->size();
605  } else {
606    assert(p == current_top, "just checking");
607    return pointer_delta(end(), (HeapWord*) p);
608  }
609}
610
611// This version requires locking.
612inline HeapWord* ContiguousSpace::allocate_impl(size_t size) {
613  assert(Heap_lock->owned_by_self() ||
614         (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
615         "not locked");
616  HeapWord* obj = top();
617  if (pointer_delta(end(), obj) >= size) {
618    HeapWord* new_top = obj + size;
619    set_top(new_top);
620    assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
621    return obj;
622  } else {
623    return NULL;
624  }
625}
626
627// This version is lock-free.
628inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size) {
629  do {
630    HeapWord* obj = top();
631    if (pointer_delta(end(), obj) >= size) {
632      HeapWord* new_top = obj + size;
633      HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
634      // result can be one of two:
635      //  the old top value: the exchange succeeded
636      //  otherwise: the new value of the top is returned.
637      if (result == obj) {
638        assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
639        return obj;
640      }
641    } else {
642      return NULL;
643    }
644  } while (true);
645}
646
647HeapWord* ContiguousSpace::allocate_aligned(size_t size) {
648  assert(Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), "not locked");
649  HeapWord* end_value = end();
650
651  HeapWord* obj = CollectedHeap::align_allocation_or_fail(top(), end_value, SurvivorAlignmentInBytes);
652  if (obj == NULL) {
653    return NULL;
654  }
655
656  if (pointer_delta(end_value, obj) >= size) {
657    HeapWord* new_top = obj + size;
658    set_top(new_top);
659    assert(::is_aligned(obj, SurvivorAlignmentInBytes) && is_aligned(new_top),
660      "checking alignment");
661    return obj;
662  } else {
663    set_top(obj);
664    return NULL;
665  }
666}
667
668// Requires locking.
669HeapWord* ContiguousSpace::allocate(size_t size) {
670  return allocate_impl(size);
671}
672
673// Lock-free.
674HeapWord* ContiguousSpace::par_allocate(size_t size) {
675  return par_allocate_impl(size);
676}
677
678void ContiguousSpace::allocate_temporary_filler(int factor) {
679  // allocate temporary type array decreasing free size with factor 'factor'
680  assert(factor >= 0, "just checking");
681  size_t size = pointer_delta(end(), top());
682
683  // if space is full, return
684  if (size == 0) return;
685
686  if (factor > 0) {
687    size -= size/factor;
688  }
689  size = align_object_size(size);
690
691  const size_t array_header_size = typeArrayOopDesc::header_size(T_INT);
692  if (size >= align_object_size(array_header_size)) {
693    size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint));
694    // allocate uninitialized int array
695    typeArrayOop t = (typeArrayOop) allocate(size);
696    assert(t != NULL, "allocation should succeed");
697    t->set_mark(markOopDesc::prototype());
698    t->set_klass(Universe::intArrayKlassObj());
699    t->set_length((int)length);
700  } else {
701    assert(size == CollectedHeap::min_fill_size(),
702           "size for smallest fake object doesn't match");
703    instanceOop obj = (instanceOop) allocate(size);
704    obj->set_mark(markOopDesc::prototype());
705    obj->set_klass_gap(0);
706    obj->set_klass(SystemDictionary::Object_klass());
707  }
708}
709
710HeapWord* OffsetTableContigSpace::initialize_threshold() {
711  return _offsets.initialize_threshold();
712}
713
714HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) {
715  _offsets.alloc_block(start, end);
716  return _offsets.threshold();
717}
718
719OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
720                                               MemRegion mr) :
721  _offsets(sharedOffsetArray, mr),
722  _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
723{
724  _offsets.set_contig_space(this);
725  initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
726}
727
728#define OBJ_SAMPLE_INTERVAL 0
729#define BLOCK_SAMPLE_INTERVAL 100
730
731void OffsetTableContigSpace::verify() const {
732  HeapWord* p = bottom();
733  HeapWord* prev_p = NULL;
734  int objs = 0;
735  int blocks = 0;
736
737  if (VerifyObjectStartArray) {
738    _offsets.verify();
739  }
740
741  while (p < top()) {
742    size_t size = oop(p)->size();
743    // For a sampling of objects in the space, find it using the
744    // block offset table.
745    if (blocks == BLOCK_SAMPLE_INTERVAL) {
746      guarantee(p == block_start_const(p + (size/2)),
747                "check offset computation");
748      blocks = 0;
749    } else {
750      blocks++;
751    }
752
753    if (objs == OBJ_SAMPLE_INTERVAL) {
754      oop(p)->verify();
755      objs = 0;
756    } else {
757      objs++;
758    }
759    prev_p = p;
760    p += size;
761  }
762  guarantee(p == top(), "end of last object must match end of space");
763}
764
765
766size_t TenuredSpace::allowed_dead_ratio() const {
767  return MarkSweepDeadRatio;
768}
769