1/*
2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/systemDictionary.hpp"
27#include "classfile/vmSymbols.hpp"
28#include "gc/serial/defNewGeneration.hpp"
29#include "gc/shared/blockOffsetTable.inline.hpp"
30#include "gc/shared/collectedHeap.inline.hpp"
31#include "gc/shared/genCollectedHeap.hpp"
32#include "gc/shared/genOopClosures.inline.hpp"
33#include "gc/shared/space.hpp"
34#include "gc/shared/space.inline.hpp"
35#include "gc/shared/spaceDecorator.hpp"
36#include "memory/universe.inline.hpp"
37#include "oops/oop.inline.hpp"
38#include "runtime/atomic.hpp"
39#include "runtime/java.hpp"
40#include "runtime/orderAccess.inline.hpp"
41#include "runtime/prefetch.inline.hpp"
42#include "runtime/safepoint.hpp"
43#include "utilities/align.hpp"
44#include "utilities/copy.hpp"
45#include "utilities/globalDefinitions.hpp"
46#include "utilities/macros.hpp"
47
48HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
49                                                HeapWord* top_obj) {
50  if (top_obj != NULL) {
51    if (_sp->block_is_obj(top_obj)) {
52      if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
53        if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
54          // An arrayOop is starting on the dirty card - since we do exact
55          // store checks for objArrays we are done.
56        } else {
57          // Otherwise, it is possible that the object starting on the dirty
58          // card spans the entire card, and that the store happened on a
59          // later card.  Figure out where the object ends.
60          // Use the block_size() method of the space over which
61          // the iteration is being done.  That space (e.g. CMS) may have
62          // specific requirements on object sizes which will
63          // be reflected in the block_size() method.
64          top = top_obj + oop(top_obj)->size();
65        }
66      }
67    } else {
68      top = top_obj;
69    }
70  } else {
71    assert(top == _sp->end(), "only case where top_obj == NULL");
72  }
73  return top;
74}
75
76void DirtyCardToOopClosure::walk_mem_region(MemRegion mr,
77                                            HeapWord* bottom,
78                                            HeapWord* top) {
79  // 1. Blocks may or may not be objects.
80  // 2. Even when a block_is_obj(), it may not entirely
81  //    occupy the block if the block quantum is larger than
82  //    the object size.
83  // We can and should try to optimize by calling the non-MemRegion
84  // version of oop_iterate() for all but the extremal objects
85  // (for which we need to call the MemRegion version of
86  // oop_iterate()) To be done post-beta XXX
87  for (; bottom < top; bottom += _sp->block_size(bottom)) {
88    // As in the case of contiguous space above, we'd like to
89    // just use the value returned by oop_iterate to increment the
90    // current pointer; unfortunately, that won't work in CMS because
91    // we'd need an interface change (it seems) to have the space
92    // "adjust the object size" (for instance pad it up to its
93    // block alignment or minimum block size restrictions. XXX
94    if (_sp->block_is_obj(bottom) &&
95        !_sp->obj_allocated_since_save_marks(oop(bottom))) {
96      oop(bottom)->oop_iterate(_cl, mr);
97    }
98  }
99}
100
101// We get called with "mr" representing the dirty region
102// that we want to process. Because of imprecise marking,
103// we may need to extend the incoming "mr" to the right,
104// and scan more. However, because we may already have
105// scanned some of that extended region, we may need to
106// trim its right-end back some so we do not scan what
107// we (or another worker thread) may already have scanned
108// or planning to scan.
109void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
110
111  // Some collectors need to do special things whenever their dirty
112  // cards are processed. For instance, CMS must remember mutator updates
113  // (i.e. dirty cards) so as to re-scan mutated objects.
114  // Such work can be piggy-backed here on dirty card scanning, so as to make
115  // it slightly more efficient than doing a complete non-destructive pre-scan
116  // of the card table.
117  MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
118  if (pCl != NULL) {
119    pCl->do_MemRegion(mr);
120  }
121
122  HeapWord* bottom = mr.start();
123  HeapWord* last = mr.last();
124  HeapWord* top = mr.end();
125  HeapWord* bottom_obj;
126  HeapWord* top_obj;
127
128  assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
129         _precision == CardTableModRefBS::Precise,
130         "Only ones we deal with for now.");
131
132  assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
133         _cl->idempotent() || _last_bottom == NULL ||
134         top <= _last_bottom,
135         "Not decreasing");
136  NOT_PRODUCT(_last_bottom = mr.start());
137
138  bottom_obj = _sp->block_start(bottom);
139  top_obj    = _sp->block_start(last);
140
141  assert(bottom_obj <= bottom, "just checking");
142  assert(top_obj    <= top,    "just checking");
143
144  // Given what we think is the top of the memory region and
145  // the start of the object at the top, get the actual
146  // value of the top.
147  top = get_actual_top(top, top_obj);
148
149  // If the previous call did some part of this region, don't redo.
150  if (_precision == CardTableModRefBS::ObjHeadPreciseArray &&
151      _min_done != NULL &&
152      _min_done < top) {
153    top = _min_done;
154  }
155
156  // Top may have been reset, and in fact may be below bottom,
157  // e.g. the dirty card region is entirely in a now free object
158  // -- something that could happen with a concurrent sweeper.
159  bottom = MIN2(bottom, top);
160  MemRegion extended_mr = MemRegion(bottom, top);
161  assert(bottom <= top &&
162         (_precision != CardTableModRefBS::ObjHeadPreciseArray ||
163          _min_done == NULL ||
164          top <= _min_done),
165         "overlap!");
166
167  // Walk the region if it is not empty; otherwise there is nothing to do.
168  if (!extended_mr.is_empty()) {
169    walk_mem_region(extended_mr, bottom_obj, top);
170  }
171
172  // An idempotent closure might be applied in any order, so we don't
173  // record a _min_done for it.
174  if (!_cl->idempotent()) {
175    _min_done = bottom;
176  } else {
177    assert(_min_done == _last_explicit_min_done,
178           "Don't update _min_done for idempotent cl");
179  }
180}
181
182DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl,
183                                          CardTableModRefBS::PrecisionStyle precision,
184                                          HeapWord* boundary,
185                                          bool parallel) {
186  return new DirtyCardToOopClosure(this, cl, precision, boundary);
187}
188
189HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
190                                               HeapWord* top_obj) {
191  if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
192    if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
193      if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
194        // An arrayOop is starting on the dirty card - since we do exact
195        // store checks for objArrays we are done.
196      } else {
197        // Otherwise, it is possible that the object starting on the dirty
198        // card spans the entire card, and that the store happened on a
199        // later card.  Figure out where the object ends.
200        assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(),
201          "Block size and object size mismatch");
202        top = top_obj + oop(top_obj)->size();
203      }
204    }
205  } else {
206    top = (_sp->toContiguousSpace())->top();
207  }
208  return top;
209}
210
211void FilteringDCTOC::walk_mem_region(MemRegion mr,
212                                     HeapWord* bottom,
213                                     HeapWord* top) {
214  // Note that this assumption won't hold if we have a concurrent
215  // collector in this space, which may have freed up objects after
216  // they were dirtied and before the stop-the-world GC that is
217  // examining cards here.
218  assert(bottom < top, "ought to be at least one obj on a dirty card.");
219
220  if (_boundary != NULL) {
221    // We have a boundary outside of which we don't want to look
222    // at objects, so create a filtering closure around the
223    // oop closure before walking the region.
224    FilteringClosure filter(_boundary, _cl);
225    walk_mem_region_with_cl(mr, bottom, top, &filter);
226  } else {
227    // No boundary, simply walk the heap with the oop closure.
228    walk_mem_region_with_cl(mr, bottom, top, _cl);
229  }
230
231}
232
233// We must replicate this so that the static type of "FilteringClosure"
234// (see above) is apparent at the oop_iterate calls.
235#define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
236void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,        \
237                                                   HeapWord* bottom,    \
238                                                   HeapWord* top,       \
239                                                   ClosureType* cl) {   \
240  bottom += oop(bottom)->oop_iterate_size(cl, mr);                      \
241  if (bottom < top) {                                                   \
242    HeapWord* next_obj = bottom + oop(bottom)->size();                  \
243    while (next_obj < top) {                                            \
244      /* Bottom lies entirely below top, so we can call the */          \
245      /* non-memRegion version of oop_iterate below. */                 \
246      oop(bottom)->oop_iterate(cl);                                     \
247      bottom = next_obj;                                                \
248      next_obj = bottom + oop(bottom)->size();                          \
249    }                                                                   \
250    /* Last object. */                                                  \
251    oop(bottom)->oop_iterate(cl, mr);                                   \
252  }                                                                     \
253}
254
255// (There are only two of these, rather than N, because the split is due
256// only to the introduction of the FilteringClosure, a local part of the
257// impl of this abstraction.)
258ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
259ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
260
261DirtyCardToOopClosure*
262ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl,
263                             CardTableModRefBS::PrecisionStyle precision,
264                             HeapWord* boundary,
265                             bool parallel) {
266  return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
267}
268
269void Space::initialize(MemRegion mr,
270                       bool clear_space,
271                       bool mangle_space) {
272  HeapWord* bottom = mr.start();
273  HeapWord* end    = mr.end();
274  assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
275         "invalid space boundaries");
276  set_bottom(bottom);
277  set_end(end);
278  if (clear_space) clear(mangle_space);
279}
280
281void Space::clear(bool mangle_space) {
282  if (ZapUnusedHeapArea && mangle_space) {
283    mangle_unused_area();
284  }
285}
286
287ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL),
288    _concurrent_iteration_safe_limit(NULL) {
289  _mangler = new GenSpaceMangler(this);
290}
291
292ContiguousSpace::~ContiguousSpace() {
293  delete _mangler;
294}
295
296void ContiguousSpace::initialize(MemRegion mr,
297                                 bool clear_space,
298                                 bool mangle_space)
299{
300  CompactibleSpace::initialize(mr, clear_space, mangle_space);
301  set_concurrent_iteration_safe_limit(top());
302}
303
304void ContiguousSpace::clear(bool mangle_space) {
305  set_top(bottom());
306  set_saved_mark();
307  CompactibleSpace::clear(mangle_space);
308}
309
310bool ContiguousSpace::is_free_block(const HeapWord* p) const {
311  return p >= _top;
312}
313
314void OffsetTableContigSpace::clear(bool mangle_space) {
315  ContiguousSpace::clear(mangle_space);
316  _offsets.initialize_threshold();
317}
318
319void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
320  Space::set_bottom(new_bottom);
321  _offsets.set_bottom(new_bottom);
322}
323
324void OffsetTableContigSpace::set_end(HeapWord* new_end) {
325  // Space should not advertise an increase in size
326  // until after the underlying offset table has been enlarged.
327  _offsets.resize(pointer_delta(new_end, bottom()));
328  Space::set_end(new_end);
329}
330
331#ifndef PRODUCT
332
333void ContiguousSpace::set_top_for_allocations(HeapWord* v) {
334  mangler()->set_top_for_allocations(v);
335}
336void ContiguousSpace::set_top_for_allocations() {
337  mangler()->set_top_for_allocations(top());
338}
339void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) {
340  mangler()->check_mangled_unused_area(limit);
341}
342
343void ContiguousSpace::check_mangled_unused_area_complete() {
344  mangler()->check_mangled_unused_area_complete();
345}
346
347// Mangled only the unused space that has not previously
348// been mangled and that has not been allocated since being
349// mangled.
350void ContiguousSpace::mangle_unused_area() {
351  mangler()->mangle_unused_area();
352}
353void ContiguousSpace::mangle_unused_area_complete() {
354  mangler()->mangle_unused_area_complete();
355}
356#endif  // NOT_PRODUCT
357
358void CompactibleSpace::initialize(MemRegion mr,
359                                  bool clear_space,
360                                  bool mangle_space) {
361  Space::initialize(mr, clear_space, mangle_space);
362  set_compaction_top(bottom());
363  _next_compaction_space = NULL;
364}
365
366void CompactibleSpace::clear(bool mangle_space) {
367  Space::clear(mangle_space);
368  _compaction_top = bottom();
369}
370
371HeapWord* CompactibleSpace::forward(oop q, size_t size,
372                                    CompactPoint* cp, HeapWord* compact_top) {
373  // q is alive
374  // First check if we should switch compaction space
375  assert(this == cp->space, "'this' should be current compaction space.");
376  size_t compaction_max_size = pointer_delta(end(), compact_top);
377  while (size > compaction_max_size) {
378    // switch to next compaction space
379    cp->space->set_compaction_top(compact_top);
380    cp->space = cp->space->next_compaction_space();
381    if (cp->space == NULL) {
382      cp->gen = GenCollectedHeap::heap()->young_gen();
383      assert(cp->gen != NULL, "compaction must succeed");
384      cp->space = cp->gen->first_compaction_space();
385      assert(cp->space != NULL, "generation must have a first compaction space");
386    }
387    compact_top = cp->space->bottom();
388    cp->space->set_compaction_top(compact_top);
389    cp->threshold = cp->space->initialize_threshold();
390    compaction_max_size = pointer_delta(cp->space->end(), compact_top);
391  }
392
393  // store the forwarding pointer into the mark word
394  if ((HeapWord*)q != compact_top) {
395    q->forward_to(oop(compact_top));
396    assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
397  } else {
398    // if the object isn't moving we can just set the mark to the default
399    // mark and handle it specially later on.
400    q->init_mark();
401    assert(q->forwardee() == NULL, "should be forwarded to NULL");
402  }
403
404  compact_top += size;
405
406  // we need to update the offset table so that the beginnings of objects can be
407  // found during scavenge.  Note that we are updating the offset table based on
408  // where the object will be once the compaction phase finishes.
409  if (compact_top > cp->threshold)
410    cp->threshold =
411      cp->space->cross_threshold(compact_top - size, compact_top);
412  return compact_top;
413}
414
415void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
416  scan_and_forward(this, cp);
417}
418
419void CompactibleSpace::adjust_pointers() {
420  // Check first is there is any work to do.
421  if (used() == 0) {
422    return;   // Nothing to do.
423  }
424
425  scan_and_adjust_pointers(this);
426}
427
428void CompactibleSpace::compact() {
429  scan_and_compact(this);
430}
431
432void Space::print_short() const { print_short_on(tty); }
433
434void Space::print_short_on(outputStream* st) const {
435  st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K,
436              (int) ((double) used() * 100 / capacity()));
437}
438
439void Space::print() const { print_on(tty); }
440
441void Space::print_on(outputStream* st) const {
442  print_short_on(st);
443  st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")",
444                p2i(bottom()), p2i(end()));
445}
446
447void ContiguousSpace::print_on(outputStream* st) const {
448  print_short_on(st);
449  st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
450                p2i(bottom()), p2i(top()), p2i(end()));
451}
452
453void OffsetTableContigSpace::print_on(outputStream* st) const {
454  print_short_on(st);
455  st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
456                INTPTR_FORMAT ", " INTPTR_FORMAT ")",
457              p2i(bottom()), p2i(top()), p2i(_offsets.threshold()), p2i(end()));
458}
459
460void ContiguousSpace::verify() const {
461  HeapWord* p = bottom();
462  HeapWord* t = top();
463  HeapWord* prev_p = NULL;
464  while (p < t) {
465    oop(p)->verify();
466    prev_p = p;
467    p += oop(p)->size();
468  }
469  guarantee(p == top(), "end of last object must match end of space");
470  if (top() != end()) {
471    guarantee(top() == block_start_const(end()-1) &&
472              top() == block_start_const(top()),
473              "top should be start of unallocated block, if it exists");
474  }
475}
476
477void Space::oop_iterate(ExtendedOopClosure* blk) {
478  ObjectToOopClosure blk2(blk);
479  object_iterate(&blk2);
480}
481
482bool Space::obj_is_alive(const HeapWord* p) const {
483  assert (block_is_obj(p), "The address should point to an object");
484  return true;
485}
486
487#if INCLUDE_ALL_GCS
488#define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)         \
489                                                                            \
490  void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\
491    HeapWord* obj_addr = mr.start();                                        \
492    HeapWord* t = mr.end();                                                 \
493    while (obj_addr < t) {                                                  \
494      assert(oopDesc::is_oop(oop(obj_addr)), "Should be an oop");           \
495      obj_addr += oop(obj_addr)->oop_iterate_size(blk);                     \
496    }                                                                       \
497  }
498
499  ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN)
500
501#undef ContigSpace_PAR_OOP_ITERATE_DEFN
502#endif // INCLUDE_ALL_GCS
503
504void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) {
505  if (is_empty()) return;
506  HeapWord* obj_addr = bottom();
507  HeapWord* t = top();
508  // Could call objects iterate, but this is easier.
509  while (obj_addr < t) {
510    obj_addr += oop(obj_addr)->oop_iterate_size(blk);
511  }
512}
513
514void ContiguousSpace::object_iterate(ObjectClosure* blk) {
515  if (is_empty()) return;
516  object_iterate_from(bottom(), blk);
517}
518
519// For a ContiguousSpace object_iterate() and safe_object_iterate()
520// are the same.
521void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
522  object_iterate(blk);
523}
524
525void ContiguousSpace::object_iterate_from(HeapWord* mark, ObjectClosure* blk) {
526  while (mark < top()) {
527    blk->do_object(oop(mark));
528    mark += oop(mark)->size();
529  }
530}
531
532HeapWord*
533ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) {
534  HeapWord * limit = concurrent_iteration_safe_limit();
535  assert(limit <= top(), "sanity check");
536  for (HeapWord* p = bottom(); p < limit;) {
537    size_t size = blk->do_object_careful(oop(p));
538    if (size == 0) {
539      return p;  // failed at p
540    } else {
541      p += size;
542    }
543  }
544  return NULL; // all done
545}
546
547#define ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)  \
548                                                                          \
549void ContiguousSpace::                                                    \
550oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {            \
551  HeapWord* t;                                                            \
552  HeapWord* p = saved_mark_word();                                        \
553  assert(p != NULL, "expected saved mark");                               \
554                                                                          \
555  const intx interval = PrefetchScanIntervalInBytes;                      \
556  do {                                                                    \
557    t = top();                                                            \
558    while (p < t) {                                                       \
559      Prefetch::write(p, interval);                                       \
560      debug_only(HeapWord* prev = p);                                     \
561      oop m = oop(p);                                                     \
562      p += m->oop_iterate_size(blk);                                      \
563    }                                                                     \
564  } while (t < top());                                                    \
565                                                                          \
566  set_saved_mark_word(p);                                                 \
567}
568
569ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN)
570
571#undef ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN
572
573// Very general, slow implementation.
574HeapWord* ContiguousSpace::block_start_const(const void* p) const {
575  assert(MemRegion(bottom(), end()).contains(p),
576         "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
577         p2i(p), p2i(bottom()), p2i(end()));
578  if (p >= top()) {
579    return top();
580  } else {
581    HeapWord* last = bottom();
582    HeapWord* cur = last;
583    while (cur <= p) {
584      last = cur;
585      cur += oop(cur)->size();
586    }
587    assert(oopDesc::is_oop(oop(last)), PTR_FORMAT " should be an object start", p2i(last));
588    return last;
589  }
590}
591
592size_t ContiguousSpace::block_size(const HeapWord* p) const {
593  assert(MemRegion(bottom(), end()).contains(p),
594         "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
595         p2i(p), p2i(bottom()), p2i(end()));
596  HeapWord* current_top = top();
597  assert(p <= current_top,
598         "p > current top - p: " PTR_FORMAT ", current top: " PTR_FORMAT,
599         p2i(p), p2i(current_top));
600  assert(p == current_top || oopDesc::is_oop(oop(p)),
601         "p (" PTR_FORMAT ") is not a block start - "
602         "current_top: " PTR_FORMAT ", is_oop: %s",
603         p2i(p), p2i(current_top), BOOL_TO_STR(oopDesc::is_oop(oop(p))));
604  if (p < current_top) {
605    return oop(p)->size();
606  } else {
607    assert(p == current_top, "just checking");
608    return pointer_delta(end(), (HeapWord*) p);
609  }
610}
611
612// This version requires locking.
613inline HeapWord* ContiguousSpace::allocate_impl(size_t size) {
614  assert(Heap_lock->owned_by_self() ||
615         (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
616         "not locked");
617  HeapWord* obj = top();
618  if (pointer_delta(end(), obj) >= size) {
619    HeapWord* new_top = obj + size;
620    set_top(new_top);
621    assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
622    return obj;
623  } else {
624    return NULL;
625  }
626}
627
628// This version is lock-free.
629inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size) {
630  do {
631    HeapWord* obj = top();
632    if (pointer_delta(end(), obj) >= size) {
633      HeapWord* new_top = obj + size;
634      HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
635      // result can be one of two:
636      //  the old top value: the exchange succeeded
637      //  otherwise: the new value of the top is returned.
638      if (result == obj) {
639        assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
640        return obj;
641      }
642    } else {
643      return NULL;
644    }
645  } while (true);
646}
647
648HeapWord* ContiguousSpace::allocate_aligned(size_t size) {
649  assert(Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), "not locked");
650  HeapWord* end_value = end();
651
652  HeapWord* obj = CollectedHeap::align_allocation_or_fail(top(), end_value, SurvivorAlignmentInBytes);
653  if (obj == NULL) {
654    return NULL;
655  }
656
657  if (pointer_delta(end_value, obj) >= size) {
658    HeapWord* new_top = obj + size;
659    set_top(new_top);
660    assert(::is_aligned(obj, SurvivorAlignmentInBytes) && is_aligned(new_top),
661      "checking alignment");
662    return obj;
663  } else {
664    set_top(obj);
665    return NULL;
666  }
667}
668
669// Requires locking.
670HeapWord* ContiguousSpace::allocate(size_t size) {
671  return allocate_impl(size);
672}
673
674// Lock-free.
675HeapWord* ContiguousSpace::par_allocate(size_t size) {
676  return par_allocate_impl(size);
677}
678
679void ContiguousSpace::allocate_temporary_filler(int factor) {
680  // allocate temporary type array decreasing free size with factor 'factor'
681  assert(factor >= 0, "just checking");
682  size_t size = pointer_delta(end(), top());
683
684  // if space is full, return
685  if (size == 0) return;
686
687  if (factor > 0) {
688    size -= size/factor;
689  }
690  size = align_object_size(size);
691
692  const size_t array_header_size = typeArrayOopDesc::header_size(T_INT);
693  if (size >= align_object_size(array_header_size)) {
694    size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint));
695    // allocate uninitialized int array
696    typeArrayOop t = (typeArrayOop) allocate(size);
697    assert(t != NULL, "allocation should succeed");
698    t->set_mark(markOopDesc::prototype());
699    t->set_klass(Universe::intArrayKlassObj());
700    t->set_length((int)length);
701  } else {
702    assert(size == CollectedHeap::min_fill_size(),
703           "size for smallest fake object doesn't match");
704    instanceOop obj = (instanceOop) allocate(size);
705    obj->set_mark(markOopDesc::prototype());
706    obj->set_klass_gap(0);
707    obj->set_klass(SystemDictionary::Object_klass());
708  }
709}
710
711HeapWord* OffsetTableContigSpace::initialize_threshold() {
712  return _offsets.initialize_threshold();
713}
714
715HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) {
716  _offsets.alloc_block(start, end);
717  return _offsets.threshold();
718}
719
720OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
721                                               MemRegion mr) :
722  _offsets(sharedOffsetArray, mr),
723  _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
724{
725  _offsets.set_contig_space(this);
726  initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
727}
728
729#define OBJ_SAMPLE_INTERVAL 0
730#define BLOCK_SAMPLE_INTERVAL 100
731
732void OffsetTableContigSpace::verify() const {
733  HeapWord* p = bottom();
734  HeapWord* prev_p = NULL;
735  int objs = 0;
736  int blocks = 0;
737
738  if (VerifyObjectStartArray) {
739    _offsets.verify();
740  }
741
742  while (p < top()) {
743    size_t size = oop(p)->size();
744    // For a sampling of objects in the space, find it using the
745    // block offset table.
746    if (blocks == BLOCK_SAMPLE_INTERVAL) {
747      guarantee(p == block_start_const(p + (size/2)),
748                "check offset computation");
749      blocks = 0;
750    } else {
751      blocks++;
752    }
753
754    if (objs == OBJ_SAMPLE_INTERVAL) {
755      oop(p)->verify();
756      objs = 0;
757    } else {
758      objs++;
759    }
760    prev_p = p;
761    p += size;
762  }
763  guarantee(p == top(), "end of last object must match end of space");
764}
765
766
767size_t TenuredSpace::allowed_dead_ratio() const {
768  return MarkSweepDeadRatio;
769}
770