1/*
2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP
26#define SHARE_VM_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP
27
28#include "gc/cms/adaptiveFreeList.hpp"
29#include "gc/cms/promotionInfo.hpp"
30#include "gc/shared/blockOffsetTable.hpp"
31#include "gc/shared/space.hpp"
32#include "logging/log.hpp"
33#include "memory/binaryTreeDictionary.hpp"
34#include "memory/freeList.hpp"
35
36// Classes in support of keeping track of promotions into a non-Contiguous
37// space, in this case a CompactibleFreeListSpace.
38
39// Forward declarations
40class CMSCollector;
41class CompactibleFreeListSpace;
42class ConcurrentMarkSweepGeneration;
43class BlkClosure;
44class BlkClosureCareful;
45class FreeChunk;
46class UpwardsObjectClosure;
47class ObjectClosureCareful;
48class Klass;
49
50class LinearAllocBlock VALUE_OBJ_CLASS_SPEC {
51 public:
52  LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0),
53    _allocation_size_limit(0) {}
54  void set(HeapWord* ptr, size_t word_size, size_t refill_size,
55    size_t allocation_size_limit) {
56    _ptr = ptr;
57    _word_size = word_size;
58    _refillSize = refill_size;
59    _allocation_size_limit = allocation_size_limit;
60  }
61  HeapWord* _ptr;
62  size_t    _word_size;
63  size_t    _refillSize;
64  size_t    _allocation_size_limit;  // Largest size that will be allocated
65
66  void print_on(outputStream* st) const;
67};
68
69// Concrete subclass of CompactibleSpace that implements
70// a free list space, such as used in the concurrent mark sweep
71// generation.
72
73class CompactibleFreeListSpace: public CompactibleSpace {
74  friend class VMStructs;
75  friend class ConcurrentMarkSweepGeneration;
76  friend class CMSCollector;
77  // Local alloc buffer for promotion into this space.
78  friend class CompactibleFreeListSpaceLAB;
79  // Allow scan_and_* functions to call (private) overrides of the auxiliary functions on this class
80  template <typename SpaceType>
81  friend void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space);
82  template <typename SpaceType>
83  friend void CompactibleSpace::scan_and_compact(SpaceType* space);
84  template <typename SpaceType>
85  friend void CompactibleSpace::verify_up_to_first_dead(SpaceType* space);
86  template <typename SpaceType>
87  friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
88
89  // "Size" of chunks of work (executed during parallel remark phases
90  // of CMS collection); this probably belongs in CMSCollector, although
91  // it's cached here because it's used in
92  // initialize_sequential_subtasks_for_rescan() which modifies
93  // par_seq_tasks which also lives in Space. XXX
94  const size_t _rescan_task_size;
95  const size_t _marking_task_size;
96
97  // Yet another sequential tasks done structure. This supports
98  // CMS GC, where we have threads dynamically
99  // claiming sub-tasks from a larger parallel task.
100  SequentialSubTasksDone _conc_par_seq_tasks;
101
102  BlockOffsetArrayNonContigSpace _bt;
103
104  CMSCollector* _collector;
105  ConcurrentMarkSweepGeneration* _old_gen;
106
107  // Data structures for free blocks (used during allocation/sweeping)
108
109  // Allocation is done linearly from two different blocks depending on
110  // whether the request is small or large, in an effort to reduce
111  // fragmentation. We assume that any locking for allocation is done
112  // by the containing generation. Thus, none of the methods in this
113  // space are re-entrant.
114  enum SomeConstants {
115    SmallForLinearAlloc = 16,        // size < this then use _sLAB
116    SmallForDictionary  = 257,       // size < this then use _indexedFreeList
117    IndexSetSize        = SmallForDictionary  // keep this odd-sized
118  };
119  static size_t IndexSetStart;
120  static size_t IndexSetStride;
121
122 private:
123  enum FitStrategyOptions {
124    FreeBlockStrategyNone = 0,
125    FreeBlockBestFitFirst
126  };
127
128  PromotionInfo _promoInfo;
129
130  // Helps to impose a global total order on freelistLock ranks;
131  // assumes that CFLSpace's are allocated in global total order
132  static int   _lockRank;
133
134  // A lock protecting the free lists and free blocks;
135  // mutable because of ubiquity of locking even for otherwise const methods
136  mutable Mutex _freelistLock;
137  // Locking verifier convenience function
138  void assert_locked() const PRODUCT_RETURN;
139  void assert_locked(const Mutex* lock) const PRODUCT_RETURN;
140
141  // Linear allocation blocks
142  LinearAllocBlock _smallLinearAllocBlock;
143
144  AFLBinaryTreeDictionary* _dictionary;    // Pointer to dictionary for large size blocks
145
146  // Indexed array for small size blocks
147  AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize];
148
149  // Allocation strategy
150  bool _fitStrategy;  // Use best fit strategy
151
152  // This is an address close to the largest free chunk in the heap.
153  // It is currently assumed to be at the end of the heap.  Free
154  // chunks with addresses greater than nearLargestChunk are coalesced
155  // in an effort to maintain a large chunk at the end of the heap.
156  HeapWord*  _nearLargestChunk;
157
158  // Used to keep track of limit of sweep for the space
159  HeapWord* _sweep_limit;
160
161  // Used to make the young collector update the mod union table
162  MemRegionClosure* _preconsumptionDirtyCardClosure;
163
164  // Support for compacting cms
165  HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
166  HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top);
167
168  // Initialization helpers.
169  void initializeIndexedFreeListArray();
170
171  // Extra stuff to manage promotion parallelism.
172
173  // A lock protecting the dictionary during par promotion allocation.
174  mutable Mutex _parDictionaryAllocLock;
175  Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; }
176
177  // Locks protecting the exact lists during par promotion allocation.
178  Mutex* _indexedFreeListParLocks[IndexSetSize];
179
180  // Attempt to obtain up to "n" blocks of the size "word_sz" (which is
181  // required to be smaller than "IndexSetSize".)  If successful,
182  // adds them to "fl", which is required to be an empty free list.
183  // If the count of "fl" is negative, it's absolute value indicates a
184  // number of free chunks that had been previously "borrowed" from global
185  // list of size "word_sz", and must now be decremented.
186  void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
187
188  // Used by par_get_chunk_of_blocks() for the chunks from the
189  // indexed_free_lists.
190  bool par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
191
192  // Used by par_get_chunk_of_blocks_dictionary() to get a chunk
193  // evenly splittable into "n" "word_sz" chunks.  Returns that
194  // evenly splittable chunk.  May split a larger chunk to get the
195  // evenly splittable chunk.
196  FreeChunk* get_n_way_chunk_to_split(size_t word_sz, size_t n);
197
198  // Used by par_get_chunk_of_blocks() for the chunks from the
199  // dictionary.
200  void par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
201
202  // Allocation helper functions
203  // Allocate using a strategy that takes from the indexed free lists
204  // first.  This allocation strategy assumes a companion sweeping
205  // strategy that attempts to keep the needed number of chunks in each
206  // indexed free lists.
207  HeapWord* allocate_adaptive_freelists(size_t size);
208
209  // Gets a chunk from the linear allocation block (LinAB).  If there
210  // is not enough space in the LinAB, refills it.
211  HeapWord*  getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size);
212  HeapWord*  getChunkFromSmallLinearAllocBlock(size_t size);
213  // Get a chunk from the space remaining in the linear allocation block.  Do
214  // not attempt to refill if the space is not available, return NULL.  Do the
215  // repairs on the linear allocation block as appropriate.
216  HeapWord*  getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size);
217  inline HeapWord*  getChunkFromSmallLinearAllocBlockRemainder(size_t size);
218
219  // Helper function for getChunkFromIndexedFreeList.
220  // Replenish the indexed free list for this "size".  Do not take from an
221  // underpopulated size.
222  FreeChunk*  getChunkFromIndexedFreeListHelper(size_t size, bool replenish = true);
223
224  // Get a chunk from the indexed free list.  If the indexed free list
225  // does not have a free chunk, try to replenish the indexed free list
226  // then get the free chunk from the replenished indexed free list.
227  inline FreeChunk* getChunkFromIndexedFreeList(size_t size);
228
229  // The returned chunk may be larger than requested (or null).
230  FreeChunk* getChunkFromDictionary(size_t size);
231  // The returned chunk is the exact size requested (or null).
232  FreeChunk* getChunkFromDictionaryExact(size_t size);
233
234  // Find a chunk in the indexed free list that is the best
235  // fit for size "numWords".
236  FreeChunk* bestFitSmall(size_t numWords);
237  // For free list "fl" of chunks of size > numWords,
238  // remove a chunk, split off a chunk of size numWords
239  // and return it.  The split off remainder is returned to
240  // the free lists.  The old name for getFromListGreater
241  // was lookInListGreater.
242  FreeChunk* getFromListGreater(AdaptiveFreeList<FreeChunk>* fl, size_t numWords);
243  // Get a chunk in the indexed free list or dictionary,
244  // by considering a larger chunk and splitting it.
245  FreeChunk* getChunkFromGreater(size_t numWords);
246  //  Verify that the given chunk is in the indexed free lists.
247  bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const;
248  // Remove the specified chunk from the indexed free lists.
249  void       removeChunkFromIndexedFreeList(FreeChunk* fc);
250  // Remove the specified chunk from the dictionary.
251  void       removeChunkFromDictionary(FreeChunk* fc);
252  // Split a free chunk into a smaller free chunk of size "new_size".
253  // Return the smaller free chunk and return the remainder to the
254  // free lists.
255  FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size);
256  // Add a chunk to the free lists.
257  void       addChunkToFreeLists(HeapWord* chunk, size_t size);
258  // Add a chunk to the free lists, preferring to suffix it
259  // to the last free chunk at end of space if possible, and
260  // updating the block census stats as well as block offset table.
261  // Take any locks as appropriate if we are multithreaded.
262  void       addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size);
263  // Add a free chunk to the indexed free lists.
264  void       returnChunkToFreeList(FreeChunk* chunk);
265  // Add a free chunk to the dictionary.
266  void       returnChunkToDictionary(FreeChunk* chunk);
267
268  // Functions for maintaining the linear allocation buffers (LinAB).
269  // Repairing a linear allocation block refers to operations
270  // performed on the remainder of a LinAB after an allocation
271  // has been made from it.
272  void       repairLinearAllocationBlocks();
273  void       repairLinearAllocBlock(LinearAllocBlock* blk);
274  void       refillLinearAllocBlock(LinearAllocBlock* blk);
275  void       refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk);
276  void       refillLinearAllocBlocksIfNeeded();
277
278  void       verify_objects_initialized() const;
279
280  // Statistics reporting helper functions
281  void       reportFreeListStatistics(const char* title) const;
282  void       reportIndexedFreeListStatistics(outputStream* st) const;
283  size_t     maxChunkSizeInIndexedFreeLists() const;
284  size_t     numFreeBlocksInIndexedFreeLists() const;
285  // Accessor
286  HeapWord* unallocated_block() const {
287    if (BlockOffsetArrayUseUnallocatedBlock) {
288      HeapWord* ub = _bt.unallocated_block();
289      assert(ub >= bottom() &&
290             ub <= end(), "space invariant");
291      return ub;
292    } else {
293      return end();
294    }
295  }
296  void freed(HeapWord* start, size_t size) {
297    _bt.freed(start, size);
298  }
299
300  // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support.
301  // See comments for CompactibleSpace for more information.
302  inline HeapWord* scan_limit() const {
303    return end();
304  }
305
306  inline bool scanned_block_is_obj(const HeapWord* addr) const {
307    return CompactibleFreeListSpace::block_is_obj(addr); // Avoid virtual call
308  }
309
310  inline size_t scanned_block_size(const HeapWord* addr) const {
311    return CompactibleFreeListSpace::block_size(addr); // Avoid virtual call
312  }
313
314  inline size_t adjust_obj_size(size_t size) const {
315    return adjustObjectSize(size);
316  }
317
318  inline size_t obj_size(const HeapWord* addr) const;
319
320 protected:
321  // Reset the indexed free list to its initial empty condition.
322  void resetIndexedFreeListArray();
323  // Reset to an initial state with a single free block described
324  // by the MemRegion parameter.
325  void reset(MemRegion mr);
326  // Return the total number of words in the indexed free lists.
327  size_t     totalSizeInIndexedFreeLists() const;
328
329 public:
330  // Constructor
331  CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr);
332  // Accessors
333  bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
334  AFLBinaryTreeDictionary* dictionary() const { return _dictionary; }
335  HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
336  void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
337
338  // Set CMS global values.
339  static void set_cms_values();
340
341  // Return the free chunk at the end of the space.  If no such
342  // chunk exists, return NULL.
343  FreeChunk* find_chunk_at_end();
344
345  void set_collector(CMSCollector* collector) { _collector = collector; }
346
347  // Support for parallelization of rescan and marking.
348  const size_t rescan_task_size()  const { return _rescan_task_size;  }
349  const size_t marking_task_size() const { return _marking_task_size; }
350  // Return ergonomic max size for CMSRescanMultiple and CMSConcMarkMultiple.
351  const size_t max_flag_size_for_task_size() const;
352  SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; }
353  void initialize_sequential_subtasks_for_rescan(int n_threads);
354  void initialize_sequential_subtasks_for_marking(int n_threads,
355         HeapWord* low = NULL);
356
357  virtual MemRegionClosure* preconsumptionDirtyCardClosure() const {
358    return _preconsumptionDirtyCardClosure;
359  }
360
361  void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
362    _preconsumptionDirtyCardClosure = cl;
363  }
364
365  // Space enquiries
366  size_t used() const;
367  size_t free() const;
368  size_t max_alloc_in_words() const;
369  // XXX: should have a less conservative used_region() than that of
370  // Space; we could consider keeping track of highest allocated
371  // address and correcting that at each sweep, as the sweeper
372  // goes through the entire allocated part of the generation. We
373  // could also use that information to keep the sweeper from
374  // sweeping more than is necessary. The allocator and sweeper will
375  // of course need to synchronize on this, since the sweeper will
376  // try to bump down the address and the allocator will try to bump it up.
377  // For now, however, we'll just use the default used_region()
378  // which overestimates the region by returning the entire
379  // committed region (this is safe, but inefficient).
380
381  // Returns a subregion of the space containing all the objects in
382  // the space.
383  MemRegion used_region() const {
384    return MemRegion(bottom(),
385                     BlockOffsetArrayUseUnallocatedBlock ?
386                     unallocated_block() : end());
387  }
388
389  virtual bool is_free_block(const HeapWord* p) const;
390
391  // Resizing support
392  void set_end(HeapWord* value);  // override
393
394  // Never mangle CompactibleFreeListSpace
395  void mangle_unused_area() {}
396  void mangle_unused_area_complete() {}
397
398  // Mutual exclusion support
399  Mutex* freelistLock() const { return &_freelistLock; }
400
401  // Iteration support
402  void oop_iterate(ExtendedOopClosure* cl);
403
404  void object_iterate(ObjectClosure* blk);
405  // Apply the closure to each object in the space whose references
406  // point to objects in the heap.  The usage of CompactibleFreeListSpace
407  // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
408  // objects in the space with references to objects that are no longer
409  // valid.  For example, an object may reference another object
410  // that has already been sweep up (collected).  This method uses
411  // obj_is_alive() to determine whether it is safe to iterate of
412  // an object.
413  void safe_object_iterate(ObjectClosure* blk);
414
415  // Iterate over all objects that intersect with mr, calling "cl->do_object"
416  // on each.  There is an exception to this: if this closure has already
417  // been invoked on an object, it may skip such objects in some cases.  This is
418  // Most likely to happen in an "upwards" (ascending address) iteration of
419  // MemRegions.
420  void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
421
422  // Requires that "mr" be entirely within the space.
423  // Apply "cl->do_object" to all objects that intersect with "mr".
424  // If the iteration encounters an unparseable portion of the region,
425  // terminate the iteration and return the address of the start of the
426  // subregion that isn't done.  Return of "NULL" indicates that the
427  // iteration completed.
428  HeapWord* object_iterate_careful_m(MemRegion mr,
429                                     ObjectClosureCareful* cl);
430
431  // Override: provides a DCTO_CL specific to this kind of space.
432  DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
433                                     CardTableModRefBS::PrecisionStyle precision,
434                                     HeapWord* boundary,
435                                     bool parallel);
436
437  void blk_iterate(BlkClosure* cl);
438  void blk_iterate_careful(BlkClosureCareful* cl);
439  HeapWord* block_start_const(const void* p) const;
440  HeapWord* block_start_careful(const void* p) const;
441  size_t block_size(const HeapWord* p) const;
442  size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const;
443  bool block_is_obj(const HeapWord* p) const;
444  bool obj_is_alive(const HeapWord* p) const;
445  size_t block_size_nopar(const HeapWord* p) const;
446  bool block_is_obj_nopar(const HeapWord* p) const;
447
448  // Iteration support for promotion
449  void save_marks();
450  bool no_allocs_since_save_marks();
451
452  // Iteration support for sweeping
453  void save_sweep_limit() {
454    _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
455                   unallocated_block() : end();
456    log_develop_trace(gc, sweep)(">>>>> Saving sweep limit " PTR_FORMAT
457                                 "  for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<",
458                                 p2i(_sweep_limit), p2i(bottom()), p2i(end()));
459  }
460  NOT_PRODUCT(
461    void clear_sweep_limit() { _sweep_limit = NULL; }
462  )
463  HeapWord* sweep_limit() { return _sweep_limit; }
464
465  // Apply "blk->do_oop" to the addresses of all reference fields in objects
466  // promoted into this generation since the most recent save_marks() call.
467  // Fields in objects allocated by applications of the closure
468  // *are* included in the iteration. Thus, when the iteration completes
469  // there should be no further such objects remaining.
470  #define CFLS_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)  \
471    void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
472  ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DECL)
473  #undef CFLS_OOP_SINCE_SAVE_MARKS_DECL
474
475  // Allocation support
476  HeapWord* allocate(size_t size);
477  HeapWord* par_allocate(size_t size);
478
479  oop       promote(oop obj, size_t obj_size);
480  void      gc_prologue();
481  void      gc_epilogue();
482
483  // This call is used by a containing CMS generation / collector
484  // to inform the CFLS space that a sweep has been completed
485  // and that the space can do any related house-keeping functions.
486  void      sweep_completed();
487
488  // For an object in this space, the mark-word's two
489  // LSB's having the value [11] indicates that it has been
490  // promoted since the most recent call to save_marks() on
491  // this generation and has not subsequently been iterated
492  // over (using oop_since_save_marks_iterate() above).
493  // This property holds only for single-threaded collections,
494  // and is typically used for Cheney scans; for MT scavenges,
495  // the property holds for all objects promoted during that
496  // scavenge for the duration of the scavenge and is used
497  // by card-scanning to avoid scanning objects (being) promoted
498  // during that scavenge.
499  bool obj_allocated_since_save_marks(const oop obj) const {
500    assert(is_in_reserved(obj), "Wrong space?");
501    return ((PromotedObject*)obj)->hasPromotedMark();
502  }
503
504  // A worst-case estimate of the space required (in HeapWords) to expand the
505  // heap when promoting an obj of size obj_size.
506  size_t expansionSpaceRequired(size_t obj_size) const;
507
508  FreeChunk* allocateScratch(size_t size);
509
510  // Returns true if either the small or large linear allocation buffer is empty.
511  bool       linearAllocationWouldFail() const;
512
513  // Adjust the chunk for the minimum size.  This version is called in
514  // most cases in CompactibleFreeListSpace methods.
515  inline static size_t adjustObjectSize(size_t size) {
516    return align_object_size(MAX2(size, (size_t)MinChunkSize));
517  }
518  // This is a virtual version of adjustObjectSize() that is called
519  // only occasionally when the compaction space changes and the type
520  // of the new compaction space is is only known to be CompactibleSpace.
521  size_t adjust_object_size_v(size_t size) const {
522    return adjustObjectSize(size);
523  }
524  // Minimum size of a free block.
525  virtual size_t minimum_free_block_size() const { return MinChunkSize; }
526  void      removeFreeChunkFromFreeLists(FreeChunk* chunk);
527  void      addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
528              bool coalesced);
529
530  // Support for compaction.
531  void prepare_for_compaction(CompactPoint* cp);
532  void adjust_pointers();
533  void compact();
534  // Reset the space to reflect the fact that a compaction of the
535  // space has been done.
536  virtual void reset_after_compaction();
537
538  // Debugging support.
539  void print()                            const;
540  void print_on(outputStream* st)         const;
541  void prepare_for_verify();
542  void verify()                           const;
543  void verifyFreeLists()                  const PRODUCT_RETURN;
544  void verifyIndexedFreeLists()           const;
545  void verifyIndexedFreeList(size_t size) const;
546  // Verify that the given chunk is in the free lists:
547  // i.e. either the binary tree dictionary, the indexed free lists
548  // or the linear allocation block.
549  bool verify_chunk_in_free_list(FreeChunk* fc) const;
550  // Verify that the given chunk is the linear allocation block.
551  bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const;
552  // Do some basic checks on the the free lists.
553  void check_free_list_consistency()      const PRODUCT_RETURN;
554
555  // Printing support
556  void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st);
557  void print_indexed_free_lists(outputStream* st) const;
558  void print_dictionary_free_lists(outputStream* st) const;
559  void print_promo_info_blocks(outputStream* st) const;
560
561  NOT_PRODUCT (
562    void initializeIndexedFreeListArrayReturnedBytes();
563    size_t sumIndexedFreeListArrayReturnedBytes();
564    // Return the total number of chunks in the indexed free lists.
565    size_t totalCountInIndexedFreeLists() const;
566    // Return the total number of chunks in the space.
567    size_t totalCount();
568  )
569
570  // The census consists of counts of the quantities such as
571  // the current count of the free chunks, number of chunks
572  // created as a result of the split of a larger chunk or
573  // coalescing of smaller chucks, etc.  The counts in the
574  // census is used to make decisions on splitting and
575  // coalescing of chunks during the sweep of garbage.
576
577  // Print the statistics for the free lists.
578  void printFLCensus(size_t sweep_count) const;
579
580  // Statistics functions
581  // Initialize census for lists before the sweep.
582  void beginSweepFLCensus(float inter_sweep_current,
583                          float inter_sweep_estimate,
584                          float intra_sweep_estimate);
585  // Set the surplus for each of the free lists.
586  void setFLSurplus();
587  // Set the hint for each of the free lists.
588  void setFLHints();
589  // Clear the census for each of the free lists.
590  void clearFLCensus();
591  // Perform functions for the census after the end of the sweep.
592  void endSweepFLCensus(size_t sweep_count);
593  // Return true if the count of free chunks is greater
594  // than the desired number of free chunks.
595  bool coalOverPopulated(size_t size);
596
597// Record (for each size):
598//
599//   split-births = #chunks added due to splits in (prev-sweep-end,
600//      this-sweep-start)
601//   split-deaths = #chunks removed for splits in (prev-sweep-end,
602//      this-sweep-start)
603//   num-curr     = #chunks at start of this sweep
604//   num-prev     = #chunks at end of previous sweep
605//
606// The above are quantities that are measured. Now define:
607//
608//   num-desired := num-prev + split-births - split-deaths - num-curr
609//
610// Roughly, num-prev + split-births is the supply,
611// split-deaths is demand due to other sizes
612// and num-curr is what we have left.
613//
614// Thus, num-desired is roughly speaking the "legitimate demand"
615// for blocks of this size and what we are striving to reach at the
616// end of the current sweep.
617//
618// For a given list, let num-len be its current population.
619// Define, for a free list of a given size:
620//
621//   coal-overpopulated := num-len >= num-desired * coal-surplus
622// (coal-surplus is set to 1.05, i.e. we allow a little slop when
623// coalescing -- we do not coalesce unless we think that the current
624// supply has exceeded the estimated demand by more than 5%).
625//
626// For the set of sizes in the binary tree, which is neither dense nor
627// closed, it may be the case that for a particular size we have never
628// had, or do not now have, or did not have at the previous sweep,
629// chunks of that size. We need to extend the definition of
630// coal-overpopulated to such sizes as well:
631//
632//   For a chunk in/not in the binary tree, extend coal-overpopulated
633//   defined above to include all sizes as follows:
634//
635//   . a size that is non-existent is coal-overpopulated
636//   . a size that has a num-desired <= 0 as defined above is
637//     coal-overpopulated.
638//
639// Also define, for a chunk heap-offset C and mountain heap-offset M:
640//
641//   close-to-mountain := C >= 0.99 * M
642//
643// Now, the coalescing strategy is:
644//
645//    Coalesce left-hand chunk with right-hand chunk if and
646//    only if:
647//
648//      EITHER
649//        . left-hand chunk is of a size that is coal-overpopulated
650//      OR
651//        . right-hand chunk is close-to-mountain
652  void smallCoalBirth(size_t size);
653  void smallCoalDeath(size_t size);
654  void coalBirth(size_t size);
655  void coalDeath(size_t size);
656  void smallSplitBirth(size_t size);
657  void smallSplitDeath(size_t size);
658  void split_birth(size_t size);
659  void splitDeath(size_t size);
660  void split(size_t from, size_t to1);
661
662  double flsFrag() const;
663};
664
665// A parallel-GC-thread-local allocation buffer for allocation into a
666// CompactibleFreeListSpace.
667class CompactibleFreeListSpaceLAB : public CHeapObj<mtGC> {
668  // The space that this buffer allocates into.
669  CompactibleFreeListSpace* _cfls;
670
671  // Our local free lists.
672  AdaptiveFreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
673
674  // Initialized from a command-line arg.
675
676  // Allocation statistics in support of dynamic adjustment of
677  // #blocks to claim per get_from_global_pool() call below.
678  static AdaptiveWeightedAverage
679                 _blocks_to_claim  [CompactibleFreeListSpace::IndexSetSize];
680  static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize];
681  static uint   _global_num_workers[CompactibleFreeListSpace::IndexSetSize];
682  size_t        _num_blocks        [CompactibleFreeListSpace::IndexSetSize];
683
684  // Internal work method
685  void get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl);
686
687public:
688  static const int _default_dynamic_old_plab_size = 16;
689  static const int _default_static_old_plab_size  = 50;
690
691  CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls);
692
693  // Allocate and return a block of the given size, or else return NULL.
694  HeapWord* alloc(size_t word_sz);
695
696  // Return any unused portions of the buffer to the global pool.
697  void retire(int tid);
698
699  // Dynamic OldPLABSize sizing
700  static void compute_desired_plab_size();
701  // When the settings are modified from default static initialization
702  static void modify_initialization(size_t n, unsigned wt);
703};
704
705size_t PromotionInfo::refillSize() const {
706  const size_t CMSSpoolBlockSize = 256;
707  const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop)
708                                   * CMSSpoolBlockSize);
709  return CompactibleFreeListSpace::adjustObjectSize(sz);
710}
711
712#endif // SHARE_VM_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP
713