metaspace.cpp revision 13242:fcb4803050e8
1/*
2 * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24#include "precompiled.hpp"
25#include "aot/aotLoader.hpp"
26#include "gc/shared/collectedHeap.hpp"
27#include "gc/shared/collectorPolicy.hpp"
28#include "gc/shared/gcLocker.hpp"
29#include "logging/log.hpp"
30#include "memory/allocation.hpp"
31#include "memory/binaryTreeDictionary.hpp"
32#include "memory/filemap.hpp"
33#include "memory/freeList.hpp"
34#include "memory/metachunk.hpp"
35#include "memory/metaspace.hpp"
36#include "memory/metaspaceGCThresholdUpdater.hpp"
37#include "memory/metaspaceShared.hpp"
38#include "memory/metaspaceTracer.hpp"
39#include "memory/resourceArea.hpp"
40#include "memory/universe.hpp"
41#include "runtime/atomic.hpp"
42#include "runtime/globals.hpp"
43#include "runtime/init.hpp"
44#include "runtime/java.hpp"
45#include "runtime/mutex.hpp"
46#include "runtime/orderAccess.inline.hpp"
47#include "services/memTracker.hpp"
48#include "services/memoryService.hpp"
49#include "utilities/copy.hpp"
50#include "utilities/debug.hpp"
51#include "utilities/macros.hpp"
52
53typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
54typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
55
56// Set this constant to enable slow integrity checking of the free chunk lists
57const bool metaspace_slow_verify = false;
58
59size_t const allocation_from_dictionary_limit = 4 * K;
60
61MetaWord* last_allocated = 0;
62
63size_t Metaspace::_compressed_class_space_size;
64const MetaspaceTracer* Metaspace::_tracer = NULL;
65
66// Used in declarations in SpaceManager and ChunkManager
67enum ChunkIndex {
68  ZeroIndex = 0,
69  SpecializedIndex = ZeroIndex,
70  SmallIndex = SpecializedIndex + 1,
71  MediumIndex = SmallIndex + 1,
72  HumongousIndex = MediumIndex + 1,
73  NumberOfFreeLists = 3,
74  NumberOfInUseLists = 4
75};
76
77// Helper, returns a descriptive name for the given index.
78static const char* chunk_size_name(ChunkIndex index) {
79  switch (index) {
80    case SpecializedIndex:
81      return "specialized";
82    case SmallIndex:
83      return "small";
84    case MediumIndex:
85      return "medium";
86    case HumongousIndex:
87      return "humongous";
88    default:
89      return "Invalid index";
90  }
91}
92
93enum ChunkSizes {    // in words.
94  ClassSpecializedChunk = 128,
95  SpecializedChunk = 128,
96  ClassSmallChunk = 256,
97  SmallChunk = 512,
98  ClassMediumChunk = 4 * K,
99  MediumChunk = 8 * K
100};
101
102static ChunkIndex next_chunk_index(ChunkIndex i) {
103  assert(i < NumberOfInUseLists, "Out of bound");
104  return (ChunkIndex) (i+1);
105}
106
107volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
108uint MetaspaceGC::_shrink_factor = 0;
109bool MetaspaceGC::_should_concurrent_collect = false;
110
111typedef class FreeList<Metachunk> ChunkList;
112
113// Manages the global free lists of chunks.
114class ChunkManager : public CHeapObj<mtInternal> {
115  friend class TestVirtualSpaceNodeTest;
116
117  // Free list of chunks of different sizes.
118  //   SpecializedChunk
119  //   SmallChunk
120  //   MediumChunk
121  ChunkList _free_chunks[NumberOfFreeLists];
122
123  // Return non-humongous chunk list by its index.
124  ChunkList* free_chunks(ChunkIndex index);
125
126  // Returns non-humongous chunk list for the given chunk word size.
127  ChunkList* find_free_chunks_list(size_t word_size);
128
129  //   HumongousChunk
130  ChunkTreeDictionary _humongous_dictionary;
131
132  // Returns the humongous chunk dictionary.
133  ChunkTreeDictionary* humongous_dictionary() {
134    return &_humongous_dictionary;
135  }
136
137  // Size, in metaspace words, of all chunks managed by this ChunkManager
138  size_t _free_chunks_total;
139  // Number of chunks in this ChunkManager
140  size_t _free_chunks_count;
141
142  // Update counters after a chunk was added or removed removed.
143  void account_for_added_chunk(const Metachunk* c);
144  void account_for_removed_chunk(const Metachunk* c);
145
146  // Debug support
147
148  size_t sum_free_chunks();
149  size_t sum_free_chunks_count();
150
151  void locked_verify_free_chunks_total();
152  void slow_locked_verify_free_chunks_total() {
153    if (metaspace_slow_verify) {
154      locked_verify_free_chunks_total();
155    }
156  }
157  void locked_verify_free_chunks_count();
158  void slow_locked_verify_free_chunks_count() {
159    if (metaspace_slow_verify) {
160      locked_verify_free_chunks_count();
161    }
162  }
163  void verify_free_chunks_count();
164
165 public:
166
167  ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
168      : _free_chunks_total(0), _free_chunks_count(0) {
169    _free_chunks[SpecializedIndex].set_size(specialized_size);
170    _free_chunks[SmallIndex].set_size(small_size);
171    _free_chunks[MediumIndex].set_size(medium_size);
172  }
173
174  // add or delete (return) a chunk to the global freelist.
175  Metachunk* chunk_freelist_allocate(size_t word_size);
176
177  // Map a size to a list index assuming that there are lists
178  // for special, small, medium, and humongous chunks.
179  ChunkIndex list_index(size_t size);
180
181  // Map a given index to the chunk size.
182  size_t size_by_index(ChunkIndex index);
183
184  // Take a chunk from the ChunkManager. The chunk is expected to be in
185  // the chunk manager (the freelist if non-humongous, the dictionary if
186  // humongous).
187  void remove_chunk(Metachunk* chunk);
188
189  // Return a single chunk of type index to the ChunkManager.
190  void return_single_chunk(ChunkIndex index, Metachunk* chunk);
191
192  // Add the simple linked list of chunks to the freelist of chunks
193  // of type index.
194  void return_chunk_list(ChunkIndex index, Metachunk* chunk);
195
196  // Total of the space in the free chunks list
197  size_t free_chunks_total_words();
198  size_t free_chunks_total_bytes();
199
200  // Number of chunks in the free chunks list
201  size_t free_chunks_count();
202
203  // Remove from a list by size.  Selects list based on size of chunk.
204  Metachunk* free_chunks_get(size_t chunk_word_size);
205
206#define index_bounds_check(index)                                         \
207  assert(index == SpecializedIndex ||                                     \
208         index == SmallIndex ||                                           \
209         index == MediumIndex ||                                          \
210         index == HumongousIndex, "Bad index: %d", (int) index)
211
212  size_t num_free_chunks(ChunkIndex index) const {
213    index_bounds_check(index);
214
215    if (index == HumongousIndex) {
216      return _humongous_dictionary.total_free_blocks();
217    }
218
219    ssize_t count = _free_chunks[index].count();
220    return count == -1 ? 0 : (size_t) count;
221  }
222
223  size_t size_free_chunks_in_bytes(ChunkIndex index) const {
224    index_bounds_check(index);
225
226    size_t word_size = 0;
227    if (index == HumongousIndex) {
228      word_size = _humongous_dictionary.total_size();
229    } else {
230      const size_t size_per_chunk_in_words = _free_chunks[index].size();
231      word_size = size_per_chunk_in_words * num_free_chunks(index);
232    }
233
234    return word_size * BytesPerWord;
235  }
236
237  MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
238    return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
239                                         num_free_chunks(SmallIndex),
240                                         num_free_chunks(MediumIndex),
241                                         num_free_chunks(HumongousIndex),
242                                         size_free_chunks_in_bytes(SpecializedIndex),
243                                         size_free_chunks_in_bytes(SmallIndex),
244                                         size_free_chunks_in_bytes(MediumIndex),
245                                         size_free_chunks_in_bytes(HumongousIndex));
246  }
247
248  // Debug support
249  void verify();
250  void slow_verify() {
251    if (metaspace_slow_verify) {
252      verify();
253    }
254  }
255  void locked_verify();
256  void slow_locked_verify() {
257    if (metaspace_slow_verify) {
258      locked_verify();
259    }
260  }
261  void verify_free_chunks_total();
262
263  void locked_print_free_chunks(outputStream* st);
264  void locked_print_sum_free_chunks(outputStream* st);
265
266  void print_on(outputStream* st) const;
267};
268
269class SmallBlocks : public CHeapObj<mtClass> {
270  const static uint _small_block_max_size = sizeof(TreeChunk<Metablock,  FreeList<Metablock> >)/HeapWordSize;
271  const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
272
273 private:
274  FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
275
276  FreeList<Metablock>& list_at(size_t word_size) {
277    assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
278    return _small_lists[word_size - _small_block_min_size];
279  }
280
281 public:
282  SmallBlocks() {
283    for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
284      uint k = i - _small_block_min_size;
285      _small_lists[k].set_size(i);
286    }
287  }
288
289  size_t total_size() const {
290    size_t result = 0;
291    for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
292      uint k = i - _small_block_min_size;
293      result = result + _small_lists[k].count() * _small_lists[k].size();
294    }
295    return result;
296  }
297
298  static uint small_block_max_size() { return _small_block_max_size; }
299  static uint small_block_min_size() { return _small_block_min_size; }
300
301  MetaWord* get_block(size_t word_size) {
302    if (list_at(word_size).count() > 0) {
303      MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head();
304      return new_block;
305    } else {
306      return NULL;
307    }
308  }
309  void return_block(Metablock* free_chunk, size_t word_size) {
310    list_at(word_size).return_chunk_at_head(free_chunk, false);
311    assert(list_at(word_size).count() > 0, "Should have a chunk");
312  }
313
314  void print_on(outputStream* st) const {
315    st->print_cr("SmallBlocks:");
316    for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
317      uint k = i - _small_block_min_size;
318      st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count());
319    }
320  }
321};
322
323// Used to manage the free list of Metablocks (a block corresponds
324// to the allocation of a quantum of metadata).
325class BlockFreelist : public CHeapObj<mtClass> {
326  BlockTreeDictionary* const _dictionary;
327  SmallBlocks* _small_blocks;
328
329  // Only allocate and split from freelist if the size of the allocation
330  // is at least 1/4th the size of the available block.
331  const static int WasteMultiplier = 4;
332
333  // Accessors
334  BlockTreeDictionary* dictionary() const { return _dictionary; }
335  SmallBlocks* small_blocks() {
336    if (_small_blocks == NULL) {
337      _small_blocks = new SmallBlocks();
338    }
339    return _small_blocks;
340  }
341
342 public:
343  BlockFreelist();
344  ~BlockFreelist();
345
346  // Get and return a block to the free list
347  MetaWord* get_block(size_t word_size);
348  void return_block(MetaWord* p, size_t word_size);
349
350  size_t total_size() const  {
351    size_t result = dictionary()->total_size();
352    if (_small_blocks != NULL) {
353      result = result + _small_blocks->total_size();
354    }
355    return result;
356  }
357
358  static size_t min_dictionary_size()   { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
359  void print_on(outputStream* st) const;
360};
361
362// A VirtualSpaceList node.
363class VirtualSpaceNode : public CHeapObj<mtClass> {
364  friend class VirtualSpaceList;
365
366  // Link to next VirtualSpaceNode
367  VirtualSpaceNode* _next;
368
369  // total in the VirtualSpace
370  MemRegion _reserved;
371  ReservedSpace _rs;
372  VirtualSpace _virtual_space;
373  MetaWord* _top;
374  // count of chunks contained in this VirtualSpace
375  uintx _container_count;
376
377  // Convenience functions to access the _virtual_space
378  char* low()  const { return virtual_space()->low(); }
379  char* high() const { return virtual_space()->high(); }
380
381  // The first Metachunk will be allocated at the bottom of the
382  // VirtualSpace
383  Metachunk* first_chunk() { return (Metachunk*) bottom(); }
384
385  // Committed but unused space in the virtual space
386  size_t free_words_in_vs() const;
387 public:
388
389  VirtualSpaceNode(size_t byte_size);
390  VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
391  ~VirtualSpaceNode();
392
393  // Convenience functions for logical bottom and end
394  MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
395  MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
396
397  bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
398
399  size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
400  size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
401
402  bool is_pre_committed() const { return _virtual_space.special(); }
403
404  // address of next available space in _virtual_space;
405  // Accessors
406  VirtualSpaceNode* next() { return _next; }
407  void set_next(VirtualSpaceNode* v) { _next = v; }
408
409  void set_reserved(MemRegion const v) { _reserved = v; }
410  void set_top(MetaWord* v) { _top = v; }
411
412  // Accessors
413  MemRegion* reserved() { return &_reserved; }
414  VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
415
416  // Returns true if "word_size" is available in the VirtualSpace
417  bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
418
419  MetaWord* top() const { return _top; }
420  void inc_top(size_t word_size) { _top += word_size; }
421
422  uintx container_count() { return _container_count; }
423  void inc_container_count();
424  void dec_container_count();
425#ifdef ASSERT
426  uintx container_count_slow();
427  void verify_container_count();
428#endif
429
430  // used and capacity in this single entry in the list
431  size_t used_words_in_vs() const;
432  size_t capacity_words_in_vs() const;
433
434  bool initialize();
435
436  // get space from the virtual space
437  Metachunk* take_from_committed(size_t chunk_word_size);
438
439  // Allocate a chunk from the virtual space and return it.
440  Metachunk* get_chunk_vs(size_t chunk_word_size);
441
442  // Expands/shrinks the committed space in a virtual space.  Delegates
443  // to Virtualspace
444  bool expand_by(size_t min_words, size_t preferred_words);
445
446  // In preparation for deleting this node, remove all the chunks
447  // in the node from any freelist.
448  void purge(ChunkManager* chunk_manager);
449
450  // If an allocation doesn't fit in the current node a new node is created.
451  // Allocate chunks out of the remaining committed space in this node
452  // to avoid wasting that memory.
453  // This always adds up because all the chunk sizes are multiples of
454  // the smallest chunk size.
455  void retire(ChunkManager* chunk_manager);
456
457#ifdef ASSERT
458  // Debug support
459  void mangle();
460#endif
461
462  void print_on(outputStream* st) const;
463};
464
465#define assert_is_ptr_aligned(ptr, alignment) \
466  assert(is_ptr_aligned(ptr, alignment),      \
467         PTR_FORMAT " is not aligned to "     \
468         SIZE_FORMAT, p2i(ptr), alignment)
469
470#define assert_is_size_aligned(size, alignment) \
471  assert(is_size_aligned(size, alignment),      \
472         SIZE_FORMAT " is not aligned to "      \
473         SIZE_FORMAT, size, alignment)
474
475
476// Decide if large pages should be committed when the memory is reserved.
477static bool should_commit_large_pages_when_reserving(size_t bytes) {
478  if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
479    size_t words = bytes / BytesPerWord;
480    bool is_class = false; // We never reserve large pages for the class space.
481    if (MetaspaceGC::can_expand(words, is_class) &&
482        MetaspaceGC::allowed_expansion() >= words) {
483      return true;
484    }
485  }
486
487  return false;
488}
489
490  // byte_size is the size of the associated virtualspace.
491VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
492  assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
493
494#if INCLUDE_CDS
495  // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
496  // configurable address, generally at the top of the Java heap so other
497  // memory addresses don't conflict.
498  if (DumpSharedSpaces) {
499    bool large_pages = false; // No large pages when dumping the CDS archive.
500    char* shared_base = align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
501
502    _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base);
503    if (_rs.is_reserved()) {
504      assert(shared_base == 0 || _rs.base() == shared_base, "should match");
505    } else {
506      // Get a mmap region anywhere if the SharedBaseAddress fails.
507      _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
508    }
509    if (!_rs.is_reserved()) {
510      vm_exit_during_initialization("Unable to allocate memory for shared space",
511        err_msg(SIZE_FORMAT " bytes.", bytes));
512    }
513    MetaspaceShared::initialize_shared_rs(&_rs);
514  } else
515#endif
516  {
517    bool large_pages = should_commit_large_pages_when_reserving(bytes);
518
519    _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
520  }
521
522  if (_rs.is_reserved()) {
523    assert(_rs.base() != NULL, "Catch if we get a NULL address");
524    assert(_rs.size() != 0, "Catch if we get a 0 size");
525    assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
526    assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
527
528    MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
529  }
530}
531
532void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
533  Metachunk* chunk = first_chunk();
534  Metachunk* invalid_chunk = (Metachunk*) top();
535  while (chunk < invalid_chunk ) {
536    assert(chunk->is_tagged_free(), "Should be tagged free");
537    MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
538    chunk_manager->remove_chunk(chunk);
539    assert(chunk->next() == NULL &&
540           chunk->prev() == NULL,
541           "Was not removed from its list");
542    chunk = (Metachunk*) next;
543  }
544}
545
546#ifdef ASSERT
547uintx VirtualSpaceNode::container_count_slow() {
548  uintx count = 0;
549  Metachunk* chunk = first_chunk();
550  Metachunk* invalid_chunk = (Metachunk*) top();
551  while (chunk < invalid_chunk ) {
552    MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
553    // Don't count the chunks on the free lists.  Those are
554    // still part of the VirtualSpaceNode but not currently
555    // counted.
556    if (!chunk->is_tagged_free()) {
557      count++;
558    }
559    chunk = (Metachunk*) next;
560  }
561  return count;
562}
563#endif
564
565// List of VirtualSpaces for metadata allocation.
566class VirtualSpaceList : public CHeapObj<mtClass> {
567  friend class VirtualSpaceNode;
568
569  enum VirtualSpaceSizes {
570    VirtualSpaceSize = 256 * K
571  };
572
573  // Head of the list
574  VirtualSpaceNode* _virtual_space_list;
575  // virtual space currently being used for allocations
576  VirtualSpaceNode* _current_virtual_space;
577
578  // Is this VirtualSpaceList used for the compressed class space
579  bool _is_class;
580
581  // Sum of reserved and committed memory in the virtual spaces
582  size_t _reserved_words;
583  size_t _committed_words;
584
585  // Number of virtual spaces
586  size_t _virtual_space_count;
587
588  ~VirtualSpaceList();
589
590  VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
591
592  void set_virtual_space_list(VirtualSpaceNode* v) {
593    _virtual_space_list = v;
594  }
595  void set_current_virtual_space(VirtualSpaceNode* v) {
596    _current_virtual_space = v;
597  }
598
599  void link_vs(VirtualSpaceNode* new_entry);
600
601  // Get another virtual space and add it to the list.  This
602  // is typically prompted by a failed attempt to allocate a chunk
603  // and is typically followed by the allocation of a chunk.
604  bool create_new_virtual_space(size_t vs_word_size);
605
606  // Chunk up the unused committed space in the current
607  // virtual space and add the chunks to the free list.
608  void retire_current_virtual_space();
609
610 public:
611  VirtualSpaceList(size_t word_size);
612  VirtualSpaceList(ReservedSpace rs);
613
614  size_t free_bytes();
615
616  Metachunk* get_new_chunk(size_t chunk_word_size,
617                           size_t suggested_commit_granularity);
618
619  bool expand_node_by(VirtualSpaceNode* node,
620                      size_t min_words,
621                      size_t preferred_words);
622
623  bool expand_by(size_t min_words,
624                 size_t preferred_words);
625
626  VirtualSpaceNode* current_virtual_space() {
627    return _current_virtual_space;
628  }
629
630  bool is_class() const { return _is_class; }
631
632  bool initialization_succeeded() { return _virtual_space_list != NULL; }
633
634  size_t reserved_words()  { return _reserved_words; }
635  size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
636  size_t committed_words() { return _committed_words; }
637  size_t committed_bytes() { return committed_words() * BytesPerWord; }
638
639  void inc_reserved_words(size_t v);
640  void dec_reserved_words(size_t v);
641  void inc_committed_words(size_t v);
642  void dec_committed_words(size_t v);
643  void inc_virtual_space_count();
644  void dec_virtual_space_count();
645
646  bool contains(const void* ptr);
647
648  // Unlink empty VirtualSpaceNodes and free it.
649  void purge(ChunkManager* chunk_manager);
650
651  void print_on(outputStream* st) const;
652
653  class VirtualSpaceListIterator : public StackObj {
654    VirtualSpaceNode* _virtual_spaces;
655   public:
656    VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
657      _virtual_spaces(virtual_spaces) {}
658
659    bool repeat() {
660      return _virtual_spaces != NULL;
661    }
662
663    VirtualSpaceNode* get_next() {
664      VirtualSpaceNode* result = _virtual_spaces;
665      if (_virtual_spaces != NULL) {
666        _virtual_spaces = _virtual_spaces->next();
667      }
668      return result;
669    }
670  };
671};
672
673class Metadebug : AllStatic {
674  // Debugging support for Metaspaces
675  static int _allocation_fail_alot_count;
676
677 public:
678
679  static void init_allocation_fail_alot_count();
680#ifdef ASSERT
681  static bool test_metadata_failure();
682#endif
683};
684
685int Metadebug::_allocation_fail_alot_count = 0;
686
687//  SpaceManager - used by Metaspace to handle allocations
688class SpaceManager : public CHeapObj<mtClass> {
689  friend class Metaspace;
690  friend class Metadebug;
691
692 private:
693
694  // protects allocations
695  Mutex* const _lock;
696
697  // Type of metadata allocated.
698  Metaspace::MetadataType _mdtype;
699
700  // List of chunks in use by this SpaceManager.  Allocations
701  // are done from the current chunk.  The list is used for deallocating
702  // chunks when the SpaceManager is freed.
703  Metachunk* _chunks_in_use[NumberOfInUseLists];
704  Metachunk* _current_chunk;
705
706  // Maximum number of small chunks to allocate to a SpaceManager
707  static uint const _small_chunk_limit;
708
709  // Sum of all space in allocated chunks
710  size_t _allocated_blocks_words;
711
712  // Sum of all allocated chunks
713  size_t _allocated_chunks_words;
714  size_t _allocated_chunks_count;
715
716  // Free lists of blocks are per SpaceManager since they
717  // are assumed to be in chunks in use by the SpaceManager
718  // and all chunks in use by a SpaceManager are freed when
719  // the class loader using the SpaceManager is collected.
720  BlockFreelist* _block_freelists;
721
722  // protects virtualspace and chunk expansions
723  static const char*  _expand_lock_name;
724  static const int    _expand_lock_rank;
725  static Mutex* const _expand_lock;
726
727 private:
728  // Accessors
729  Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
730  void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
731    _chunks_in_use[index] = v;
732  }
733
734  BlockFreelist* block_freelists() const { return _block_freelists; }
735
736  Metaspace::MetadataType mdtype() { return _mdtype; }
737
738  VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
739  ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
740
741  Metachunk* current_chunk() const { return _current_chunk; }
742  void set_current_chunk(Metachunk* v) {
743    _current_chunk = v;
744  }
745
746  Metachunk* find_current_chunk(size_t word_size);
747
748  // Add chunk to the list of chunks in use
749  void add_chunk(Metachunk* v, bool make_current);
750  void retire_current_chunk();
751
752  Mutex* lock() const { return _lock; }
753
754 protected:
755  void initialize();
756
757 public:
758  SpaceManager(Metaspace::MetadataType mdtype,
759               Mutex* lock);
760  ~SpaceManager();
761
762  enum ChunkMultiples {
763    MediumChunkMultiple = 4
764  };
765
766  static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
767  static size_t small_chunk_size(bool is_class)       { return is_class ? ClassSmallChunk : SmallChunk; }
768  static size_t medium_chunk_size(bool is_class)      { return is_class ? ClassMediumChunk : MediumChunk; }
769
770  static size_t smallest_chunk_size(bool is_class)    { return specialized_chunk_size(is_class); }
771
772  // Accessors
773  bool is_class() const { return _mdtype == Metaspace::ClassType; }
774
775  size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
776  size_t small_chunk_size()       const { return small_chunk_size(is_class()); }
777  size_t medium_chunk_size()      const { return medium_chunk_size(is_class()); }
778
779  size_t smallest_chunk_size()    const { return smallest_chunk_size(is_class()); }
780
781  size_t medium_chunk_bunch()     const { return medium_chunk_size() * MediumChunkMultiple; }
782
783  size_t allocated_blocks_words() const { return _allocated_blocks_words; }
784  size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
785  size_t allocated_chunks_words() const { return _allocated_chunks_words; }
786  size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; }
787  size_t allocated_chunks_count() const { return _allocated_chunks_count; }
788
789  bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
790
791  static Mutex* expand_lock() { return _expand_lock; }
792
793  // Increment the per Metaspace and global running sums for Metachunks
794  // by the given size.  This is used when a Metachunk to added to
795  // the in-use list.
796  void inc_size_metrics(size_t words);
797  // Increment the per Metaspace and global running sums Metablocks by the given
798  // size.  This is used when a Metablock is allocated.
799  void inc_used_metrics(size_t words);
800  // Delete the portion of the running sums for this SpaceManager. That is,
801  // the globals running sums for the Metachunks and Metablocks are
802  // decremented for all the Metachunks in-use by this SpaceManager.
803  void dec_total_from_size_metrics();
804
805  // Adjust the initial chunk size to match one of the fixed chunk list sizes,
806  // or return the unadjusted size if the requested size is humongous.
807  static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
808  size_t adjust_initial_chunk_size(size_t requested) const;
809
810  // Get the initial chunks size for this metaspace type.
811  size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
812
813  size_t sum_capacity_in_chunks_in_use() const;
814  size_t sum_used_in_chunks_in_use() const;
815  size_t sum_free_in_chunks_in_use() const;
816  size_t sum_waste_in_chunks_in_use() const;
817  size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
818
819  size_t sum_count_in_chunks_in_use();
820  size_t sum_count_in_chunks_in_use(ChunkIndex i);
821
822  Metachunk* get_new_chunk(size_t chunk_word_size);
823
824  // Block allocation and deallocation.
825  // Allocates a block from the current chunk
826  MetaWord* allocate(size_t word_size);
827  // Allocates a block from a small chunk
828  MetaWord* get_small_chunk_and_allocate(size_t word_size);
829
830  // Helper for allocations
831  MetaWord* allocate_work(size_t word_size);
832
833  // Returns a block to the per manager freelist
834  void deallocate(MetaWord* p, size_t word_size);
835
836  // Based on the allocation size and a minimum chunk size,
837  // returned chunk size (for expanding space for chunk allocation).
838  size_t calc_chunk_size(size_t allocation_word_size);
839
840  // Called when an allocation from the current chunk fails.
841  // Gets a new chunk (may require getting a new virtual space),
842  // and allocates from that chunk.
843  MetaWord* grow_and_allocate(size_t word_size);
844
845  // Notify memory usage to MemoryService.
846  void track_metaspace_memory_usage();
847
848  // debugging support.
849
850  void dump(outputStream* const out) const;
851  void print_on(outputStream* st) const;
852  void locked_print_chunks_in_use_on(outputStream* st) const;
853
854  void verify();
855  void verify_chunk_size(Metachunk* chunk);
856#ifdef ASSERT
857  void verify_allocated_blocks_words();
858#endif
859
860  // This adjusts the size given to be greater than the minimum allocation size in
861  // words for data in metaspace.  Esentially the minimum size is currently 3 words.
862  size_t get_allocation_word_size(size_t word_size) {
863    size_t byte_size = word_size * BytesPerWord;
864
865    size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
866    raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment());
867
868    size_t raw_word_size = raw_bytes_size / BytesPerWord;
869    assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
870
871    return raw_word_size;
872  }
873};
874
875uint const SpaceManager::_small_chunk_limit = 4;
876
877const char* SpaceManager::_expand_lock_name =
878  "SpaceManager chunk allocation lock";
879const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
880Mutex* const SpaceManager::_expand_lock =
881  new Mutex(SpaceManager::_expand_lock_rank,
882            SpaceManager::_expand_lock_name,
883            Mutex::_allow_vm_block_flag,
884            Monitor::_safepoint_check_never);
885
886void VirtualSpaceNode::inc_container_count() {
887  assert_lock_strong(SpaceManager::expand_lock());
888  _container_count++;
889}
890
891void VirtualSpaceNode::dec_container_count() {
892  assert_lock_strong(SpaceManager::expand_lock());
893  _container_count--;
894}
895
896#ifdef ASSERT
897void VirtualSpaceNode::verify_container_count() {
898  assert(_container_count == container_count_slow(),
899         "Inconsistency in container_count _container_count " UINTX_FORMAT
900         " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
901}
902#endif
903
904// BlockFreelist methods
905
906BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {}
907
908BlockFreelist::~BlockFreelist() {
909  delete _dictionary;
910  if (_small_blocks != NULL) {
911    delete _small_blocks;
912  }
913}
914
915void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
916  assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter");
917
918  Metablock* free_chunk = ::new (p) Metablock(word_size);
919  if (word_size < SmallBlocks::small_block_max_size()) {
920    small_blocks()->return_block(free_chunk, word_size);
921  } else {
922  dictionary()->return_chunk(free_chunk);
923}
924  log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = "
925            SIZE_FORMAT, p2i(free_chunk), word_size);
926}
927
928MetaWord* BlockFreelist::get_block(size_t word_size) {
929  assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter");
930
931  // Try small_blocks first.
932  if (word_size < SmallBlocks::small_block_max_size()) {
933    // Don't create small_blocks() until needed.  small_blocks() allocates the small block list for
934    // this space manager.
935    MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size);
936    if (new_block != NULL) {
937      log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
938              p2i(new_block), word_size);
939      return new_block;
940    }
941  }
942
943  if (word_size < BlockFreelist::min_dictionary_size()) {
944    // If allocation in small blocks fails, this is Dark Matter.  Too small for dictionary.
945    return NULL;
946  }
947
948  Metablock* free_block =
949    dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
950  if (free_block == NULL) {
951    return NULL;
952  }
953
954  const size_t block_size = free_block->size();
955  if (block_size > WasteMultiplier * word_size) {
956    return_block((MetaWord*)free_block, block_size);
957    return NULL;
958  }
959
960  MetaWord* new_block = (MetaWord*)free_block;
961  assert(block_size >= word_size, "Incorrect size of block from freelist");
962  const size_t unused = block_size - word_size;
963  if (unused >= SmallBlocks::small_block_min_size()) {
964    return_block(new_block + word_size, unused);
965  }
966
967  log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
968            p2i(new_block), word_size);
969  return new_block;
970}
971
972void BlockFreelist::print_on(outputStream* st) const {
973  dictionary()->print_free_lists(st);
974  if (_small_blocks != NULL) {
975    _small_blocks->print_on(st);
976  }
977}
978
979// VirtualSpaceNode methods
980
981VirtualSpaceNode::~VirtualSpaceNode() {
982  _rs.release();
983#ifdef ASSERT
984  size_t word_size = sizeof(*this) / BytesPerWord;
985  Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
986#endif
987}
988
989size_t VirtualSpaceNode::used_words_in_vs() const {
990  return pointer_delta(top(), bottom(), sizeof(MetaWord));
991}
992
993// Space committed in the VirtualSpace
994size_t VirtualSpaceNode::capacity_words_in_vs() const {
995  return pointer_delta(end(), bottom(), sizeof(MetaWord));
996}
997
998size_t VirtualSpaceNode::free_words_in_vs() const {
999  return pointer_delta(end(), top(), sizeof(MetaWord));
1000}
1001
1002// Allocates the chunk from the virtual space only.
1003// This interface is also used internally for debugging.  Not all
1004// chunks removed here are necessarily used for allocation.
1005Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
1006  // Bottom of the new chunk
1007  MetaWord* chunk_limit = top();
1008  assert(chunk_limit != NULL, "Not safe to call this method");
1009
1010  // The virtual spaces are always expanded by the
1011  // commit granularity to enforce the following condition.
1012  // Without this the is_available check will not work correctly.
1013  assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1014      "The committed memory doesn't match the expanded memory.");
1015
1016  if (!is_available(chunk_word_size)) {
1017    Log(gc, metaspace, freelist) log;
1018    log.debug("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
1019    // Dump some information about the virtual space that is nearly full
1020    ResourceMark rm;
1021    print_on(log.debug_stream());
1022    return NULL;
1023  }
1024
1025  // Take the space  (bump top on the current virtual space).
1026  inc_top(chunk_word_size);
1027
1028  // Initialize the chunk
1029  Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
1030  return result;
1031}
1032
1033
1034// Expand the virtual space (commit more of the reserved space)
1035bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1036  size_t min_bytes = min_words * BytesPerWord;
1037  size_t preferred_bytes = preferred_words * BytesPerWord;
1038
1039  size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1040
1041  if (uncommitted < min_bytes) {
1042    return false;
1043  }
1044
1045  size_t commit = MIN2(preferred_bytes, uncommitted);
1046  bool result = virtual_space()->expand_by(commit, false);
1047
1048  assert(result, "Failed to commit memory");
1049
1050  return result;
1051}
1052
1053Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1054  assert_lock_strong(SpaceManager::expand_lock());
1055  Metachunk* result = take_from_committed(chunk_word_size);
1056  if (result != NULL) {
1057    inc_container_count();
1058  }
1059  return result;
1060}
1061
1062bool VirtualSpaceNode::initialize() {
1063
1064  if (!_rs.is_reserved()) {
1065    return false;
1066  }
1067
1068  // These are necessary restriction to make sure that the virtual space always
1069  // grows in steps of Metaspace::commit_alignment(). If both base and size are
1070  // aligned only the middle alignment of the VirtualSpace is used.
1071  assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
1072  assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
1073
1074  // ReservedSpaces marked as special will have the entire memory
1075  // pre-committed. Setting a committed size will make sure that
1076  // committed_size and actual_committed_size agrees.
1077  size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
1078
1079  bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
1080                                            Metaspace::commit_alignment());
1081  if (result) {
1082    assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
1083        "Checking that the pre-committed memory was registered by the VirtualSpace");
1084
1085    set_top((MetaWord*)virtual_space()->low());
1086    set_reserved(MemRegion((HeapWord*)_rs.base(),
1087                 (HeapWord*)(_rs.base() + _rs.size())));
1088
1089    assert(reserved()->start() == (HeapWord*) _rs.base(),
1090           "Reserved start was not set properly " PTR_FORMAT
1091           " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1092    assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1093           "Reserved size was not set properly " SIZE_FORMAT
1094           " != " SIZE_FORMAT, reserved()->word_size(),
1095           _rs.size() / BytesPerWord);
1096  }
1097
1098  return result;
1099}
1100
1101void VirtualSpaceNode::print_on(outputStream* st) const {
1102  size_t used = used_words_in_vs();
1103  size_t capacity = capacity_words_in_vs();
1104  VirtualSpace* vs = virtual_space();
1105  st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used "
1106           "[" PTR_FORMAT ", " PTR_FORMAT ", "
1107           PTR_FORMAT ", " PTR_FORMAT ")",
1108           p2i(vs), capacity / K,
1109           capacity == 0 ? 0 : used * 100 / capacity,
1110           p2i(bottom()), p2i(top()), p2i(end()),
1111           p2i(vs->high_boundary()));
1112}
1113
1114#ifdef ASSERT
1115void VirtualSpaceNode::mangle() {
1116  size_t word_size = capacity_words_in_vs();
1117  Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1118}
1119#endif // ASSERT
1120
1121// VirtualSpaceList methods
1122// Space allocated from the VirtualSpace
1123
1124VirtualSpaceList::~VirtualSpaceList() {
1125  VirtualSpaceListIterator iter(virtual_space_list());
1126  while (iter.repeat()) {
1127    VirtualSpaceNode* vsl = iter.get_next();
1128    delete vsl;
1129  }
1130}
1131
1132void VirtualSpaceList::inc_reserved_words(size_t v) {
1133  assert_lock_strong(SpaceManager::expand_lock());
1134  _reserved_words = _reserved_words + v;
1135}
1136void VirtualSpaceList::dec_reserved_words(size_t v) {
1137  assert_lock_strong(SpaceManager::expand_lock());
1138  _reserved_words = _reserved_words - v;
1139}
1140
1141#define assert_committed_below_limit()                        \
1142  assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \
1143         "Too much committed memory. Committed: " SIZE_FORMAT \
1144         " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1145         MetaspaceAux::committed_bytes(), MaxMetaspaceSize);
1146
1147void VirtualSpaceList::inc_committed_words(size_t v) {
1148  assert_lock_strong(SpaceManager::expand_lock());
1149  _committed_words = _committed_words + v;
1150
1151  assert_committed_below_limit();
1152}
1153void VirtualSpaceList::dec_committed_words(size_t v) {
1154  assert_lock_strong(SpaceManager::expand_lock());
1155  _committed_words = _committed_words - v;
1156
1157  assert_committed_below_limit();
1158}
1159
1160void VirtualSpaceList::inc_virtual_space_count() {
1161  assert_lock_strong(SpaceManager::expand_lock());
1162  _virtual_space_count++;
1163}
1164void VirtualSpaceList::dec_virtual_space_count() {
1165  assert_lock_strong(SpaceManager::expand_lock());
1166  _virtual_space_count--;
1167}
1168
1169void ChunkManager::remove_chunk(Metachunk* chunk) {
1170  size_t word_size = chunk->word_size();
1171  ChunkIndex index = list_index(word_size);
1172  if (index != HumongousIndex) {
1173    free_chunks(index)->remove_chunk(chunk);
1174  } else {
1175    humongous_dictionary()->remove_chunk(chunk);
1176  }
1177
1178  // Chunk has been removed from the chunks free list, update counters.
1179  account_for_removed_chunk(chunk);
1180}
1181
1182// Walk the list of VirtualSpaceNodes and delete
1183// nodes with a 0 container_count.  Remove Metachunks in
1184// the node from their respective freelists.
1185void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1186  assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
1187  assert_lock_strong(SpaceManager::expand_lock());
1188  // Don't use a VirtualSpaceListIterator because this
1189  // list is being changed and a straightforward use of an iterator is not safe.
1190  VirtualSpaceNode* purged_vsl = NULL;
1191  VirtualSpaceNode* prev_vsl = virtual_space_list();
1192  VirtualSpaceNode* next_vsl = prev_vsl;
1193  while (next_vsl != NULL) {
1194    VirtualSpaceNode* vsl = next_vsl;
1195    DEBUG_ONLY(vsl->verify_container_count();)
1196    next_vsl = vsl->next();
1197    // Don't free the current virtual space since it will likely
1198    // be needed soon.
1199    if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1200      // Unlink it from the list
1201      if (prev_vsl == vsl) {
1202        // This is the case of the current node being the first node.
1203        assert(vsl == virtual_space_list(), "Expected to be the first node");
1204        set_virtual_space_list(vsl->next());
1205      } else {
1206        prev_vsl->set_next(vsl->next());
1207      }
1208
1209      vsl->purge(chunk_manager);
1210      dec_reserved_words(vsl->reserved_words());
1211      dec_committed_words(vsl->committed_words());
1212      dec_virtual_space_count();
1213      purged_vsl = vsl;
1214      delete vsl;
1215    } else {
1216      prev_vsl = vsl;
1217    }
1218  }
1219#ifdef ASSERT
1220  if (purged_vsl != NULL) {
1221    // List should be stable enough to use an iterator here.
1222    VirtualSpaceListIterator iter(virtual_space_list());
1223    while (iter.repeat()) {
1224      VirtualSpaceNode* vsl = iter.get_next();
1225      assert(vsl != purged_vsl, "Purge of vsl failed");
1226    }
1227  }
1228#endif
1229}
1230
1231
1232// This function looks at the mmap regions in the metaspace without locking.
1233// The chunks are added with store ordering and not deleted except for at
1234// unloading time during a safepoint.
1235bool VirtualSpaceList::contains(const void* ptr) {
1236  // List should be stable enough to use an iterator here because removing virtual
1237  // space nodes is only allowed at a safepoint.
1238  VirtualSpaceListIterator iter(virtual_space_list());
1239  while (iter.repeat()) {
1240    VirtualSpaceNode* vsn = iter.get_next();
1241    if (vsn->contains(ptr)) {
1242      return true;
1243    }
1244  }
1245  return false;
1246}
1247
1248void VirtualSpaceList::retire_current_virtual_space() {
1249  assert_lock_strong(SpaceManager::expand_lock());
1250
1251  VirtualSpaceNode* vsn = current_virtual_space();
1252
1253  ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
1254                                  Metaspace::chunk_manager_metadata();
1255
1256  vsn->retire(cm);
1257}
1258
1259void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
1260  DEBUG_ONLY(verify_container_count();)
1261  for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
1262    ChunkIndex index = (ChunkIndex)i;
1263    size_t chunk_size = chunk_manager->size_by_index(index);
1264
1265    while (free_words_in_vs() >= chunk_size) {
1266      Metachunk* chunk = get_chunk_vs(chunk_size);
1267      assert(chunk != NULL, "allocation should have been successful");
1268
1269      chunk_manager->return_single_chunk(index, chunk);
1270    }
1271    DEBUG_ONLY(verify_container_count();)
1272  }
1273  assert(free_words_in_vs() == 0, "should be empty now");
1274}
1275
1276VirtualSpaceList::VirtualSpaceList(size_t word_size) :
1277                                   _is_class(false),
1278                                   _virtual_space_list(NULL),
1279                                   _current_virtual_space(NULL),
1280                                   _reserved_words(0),
1281                                   _committed_words(0),
1282                                   _virtual_space_count(0) {
1283  MutexLockerEx cl(SpaceManager::expand_lock(),
1284                   Mutex::_no_safepoint_check_flag);
1285  create_new_virtual_space(word_size);
1286}
1287
1288VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1289                                   _is_class(true),
1290                                   _virtual_space_list(NULL),
1291                                   _current_virtual_space(NULL),
1292                                   _reserved_words(0),
1293                                   _committed_words(0),
1294                                   _virtual_space_count(0) {
1295  MutexLockerEx cl(SpaceManager::expand_lock(),
1296                   Mutex::_no_safepoint_check_flag);
1297  VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1298  bool succeeded = class_entry->initialize();
1299  if (succeeded) {
1300    link_vs(class_entry);
1301  }
1302}
1303
1304size_t VirtualSpaceList::free_bytes() {
1305  return virtual_space_list()->free_words_in_vs() * BytesPerWord;
1306}
1307
1308// Allocate another meta virtual space and add it to the list.
1309bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
1310  assert_lock_strong(SpaceManager::expand_lock());
1311
1312  if (is_class()) {
1313    assert(false, "We currently don't support more than one VirtualSpace for"
1314                  " the compressed class space. The initialization of the"
1315                  " CCS uses another code path and should not hit this path.");
1316    return false;
1317  }
1318
1319  if (vs_word_size == 0) {
1320    assert(false, "vs_word_size should always be at least _reserve_alignment large.");
1321    return false;
1322  }
1323
1324  // Reserve the space
1325  size_t vs_byte_size = vs_word_size * BytesPerWord;
1326  assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
1327
1328  // Allocate the meta virtual space and initialize it.
1329  VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1330  if (!new_entry->initialize()) {
1331    delete new_entry;
1332    return false;
1333  } else {
1334    assert(new_entry->reserved_words() == vs_word_size,
1335        "Reserved memory size differs from requested memory size");
1336    // ensure lock-free iteration sees fully initialized node
1337    OrderAccess::storestore();
1338    link_vs(new_entry);
1339    return true;
1340  }
1341}
1342
1343void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1344  if (virtual_space_list() == NULL) {
1345      set_virtual_space_list(new_entry);
1346  } else {
1347    current_virtual_space()->set_next(new_entry);
1348  }
1349  set_current_virtual_space(new_entry);
1350  inc_reserved_words(new_entry->reserved_words());
1351  inc_committed_words(new_entry->committed_words());
1352  inc_virtual_space_count();
1353#ifdef ASSERT
1354  new_entry->mangle();
1355#endif
1356  if (log_is_enabled(Trace, gc, metaspace)) {
1357    Log(gc, metaspace) log;
1358    VirtualSpaceNode* vsl = current_virtual_space();
1359    ResourceMark rm;
1360    vsl->print_on(log.trace_stream());
1361  }
1362}
1363
1364bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1365                                      size_t min_words,
1366                                      size_t preferred_words) {
1367  size_t before = node->committed_words();
1368
1369  bool result = node->expand_by(min_words, preferred_words);
1370
1371  size_t after = node->committed_words();
1372
1373  // after and before can be the same if the memory was pre-committed.
1374  assert(after >= before, "Inconsistency");
1375  inc_committed_words(after - before);
1376
1377  return result;
1378}
1379
1380bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
1381  assert_is_size_aligned(min_words,       Metaspace::commit_alignment_words());
1382  assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
1383  assert(min_words <= preferred_words, "Invalid arguments");
1384
1385  if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
1386    return  false;
1387  }
1388
1389  size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
1390  if (allowed_expansion_words < min_words) {
1391    return false;
1392  }
1393
1394  size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
1395
1396  // Commit more memory from the the current virtual space.
1397  bool vs_expanded = expand_node_by(current_virtual_space(),
1398                                    min_words,
1399                                    max_expansion_words);
1400  if (vs_expanded) {
1401    return true;
1402  }
1403  retire_current_virtual_space();
1404
1405  // Get another virtual space.
1406  size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
1407  grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
1408
1409  if (create_new_virtual_space(grow_vs_words)) {
1410    if (current_virtual_space()->is_pre_committed()) {
1411      // The memory was pre-committed, so we are done here.
1412      assert(min_words <= current_virtual_space()->committed_words(),
1413          "The new VirtualSpace was pre-committed, so it"
1414          "should be large enough to fit the alloc request.");
1415      return true;
1416    }
1417
1418    return expand_node_by(current_virtual_space(),
1419                          min_words,
1420                          max_expansion_words);
1421  }
1422
1423  return false;
1424}
1425
1426Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
1427
1428  // Allocate a chunk out of the current virtual space.
1429  Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
1430
1431  if (next != NULL) {
1432    return next;
1433  }
1434
1435  // The expand amount is currently only determined by the requested sizes
1436  // and not how much committed memory is left in the current virtual space.
1437
1438  size_t min_word_size       = align_size_up(chunk_word_size,              Metaspace::commit_alignment_words());
1439  size_t preferred_word_size = align_size_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
1440  if (min_word_size >= preferred_word_size) {
1441    // Can happen when humongous chunks are allocated.
1442    preferred_word_size = min_word_size;
1443  }
1444
1445  bool expanded = expand_by(min_word_size, preferred_word_size);
1446  if (expanded) {
1447    next = current_virtual_space()->get_chunk_vs(chunk_word_size);
1448    assert(next != NULL, "The allocation was expected to succeed after the expansion");
1449  }
1450
1451   return next;
1452}
1453
1454void VirtualSpaceList::print_on(outputStream* st) const {
1455  VirtualSpaceListIterator iter(virtual_space_list());
1456  while (iter.repeat()) {
1457    VirtualSpaceNode* node = iter.get_next();
1458    node->print_on(st);
1459  }
1460}
1461
1462// MetaspaceGC methods
1463
1464// VM_CollectForMetadataAllocation is the vm operation used to GC.
1465// Within the VM operation after the GC the attempt to allocate the metadata
1466// should succeed.  If the GC did not free enough space for the metaspace
1467// allocation, the HWM is increased so that another virtualspace will be
1468// allocated for the metadata.  With perm gen the increase in the perm
1469// gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
1470// metaspace policy uses those as the small and large steps for the HWM.
1471//
1472// After the GC the compute_new_size() for MetaspaceGC is called to
1473// resize the capacity of the metaspaces.  The current implementation
1474// is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1475// to resize the Java heap by some GC's.  New flags can be implemented
1476// if really needed.  MinMetaspaceFreeRatio is used to calculate how much
1477// free space is desirable in the metaspace capacity to decide how much
1478// to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
1479// free space is desirable in the metaspace capacity before decreasing
1480// the HWM.
1481
1482// Calculate the amount to increase the high water mark (HWM).
1483// Increase by a minimum amount (MinMetaspaceExpansion) so that
1484// another expansion is not requested too soon.  If that is not
1485// enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
1486// If that is still not enough, expand by the size of the allocation
1487// plus some.
1488size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
1489  size_t min_delta = MinMetaspaceExpansion;
1490  size_t max_delta = MaxMetaspaceExpansion;
1491  size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
1492
1493  if (delta <= min_delta) {
1494    delta = min_delta;
1495  } else if (delta <= max_delta) {
1496    // Don't want to hit the high water mark on the next
1497    // allocation so make the delta greater than just enough
1498    // for this allocation.
1499    delta = max_delta;
1500  } else {
1501    // This allocation is large but the next ones are probably not
1502    // so increase by the minimum.
1503    delta = delta + min_delta;
1504  }
1505
1506  assert_is_size_aligned(delta, Metaspace::commit_alignment());
1507
1508  return delta;
1509}
1510
1511size_t MetaspaceGC::capacity_until_GC() {
1512  size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
1513  assert(value >= MetaspaceSize, "Not initialized properly?");
1514  return value;
1515}
1516
1517bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
1518  assert_is_size_aligned(v, Metaspace::commit_alignment());
1519
1520  size_t capacity_until_GC = (size_t) _capacity_until_GC;
1521  size_t new_value = capacity_until_GC + v;
1522
1523  if (new_value < capacity_until_GC) {
1524    // The addition wrapped around, set new_value to aligned max value.
1525    new_value = align_size_down(max_uintx, Metaspace::commit_alignment());
1526  }
1527
1528  intptr_t expected = (intptr_t) capacity_until_GC;
1529  intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected);
1530
1531  if (expected != actual) {
1532    return false;
1533  }
1534
1535  if (new_cap_until_GC != NULL) {
1536    *new_cap_until_GC = new_value;
1537  }
1538  if (old_cap_until_GC != NULL) {
1539    *old_cap_until_GC = capacity_until_GC;
1540  }
1541  return true;
1542}
1543
1544size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1545  assert_is_size_aligned(v, Metaspace::commit_alignment());
1546
1547  return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
1548}
1549
1550void MetaspaceGC::initialize() {
1551  // Set the high-water mark to MaxMetapaceSize during VM initializaton since
1552  // we can't do a GC during initialization.
1553  _capacity_until_GC = MaxMetaspaceSize;
1554}
1555
1556void MetaspaceGC::post_initialize() {
1557  // Reset the high-water mark once the VM initialization is done.
1558  _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
1559}
1560
1561bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1562  // Check if the compressed class space is full.
1563  if (is_class && Metaspace::using_class_space()) {
1564    size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1565    if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1566      return false;
1567    }
1568  }
1569
1570  // Check if the user has imposed a limit on the metaspace memory.
1571  size_t committed_bytes = MetaspaceAux::committed_bytes();
1572  if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
1573    return false;
1574  }
1575
1576  return true;
1577}
1578
1579size_t MetaspaceGC::allowed_expansion() {
1580  size_t committed_bytes = MetaspaceAux::committed_bytes();
1581  size_t capacity_until_gc = capacity_until_GC();
1582
1583  assert(capacity_until_gc >= committed_bytes,
1584         "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
1585         capacity_until_gc, committed_bytes);
1586
1587  size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
1588  size_t left_until_GC = capacity_until_gc - committed_bytes;
1589  size_t left_to_commit = MIN2(left_until_GC, left_until_max);
1590
1591  return left_to_commit / BytesPerWord;
1592}
1593
1594void MetaspaceGC::compute_new_size() {
1595  assert(_shrink_factor <= 100, "invalid shrink factor");
1596  uint current_shrink_factor = _shrink_factor;
1597  _shrink_factor = 0;
1598
1599  // Using committed_bytes() for used_after_gc is an overestimation, since the
1600  // chunk free lists are included in committed_bytes() and the memory in an
1601  // un-fragmented chunk free list is available for future allocations.
1602  // However, if the chunk free lists becomes fragmented, then the memory may
1603  // not be available for future allocations and the memory is therefore "in use".
1604  // Including the chunk free lists in the definition of "in use" is therefore
1605  // necessary. Not including the chunk free lists can cause capacity_until_GC to
1606  // shrink below committed_bytes() and this has caused serious bugs in the past.
1607  const size_t used_after_gc = MetaspaceAux::committed_bytes();
1608  const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1609
1610  const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1611  const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1612
1613  const double min_tmp = used_after_gc / maximum_used_percentage;
1614  size_t minimum_desired_capacity =
1615    (size_t)MIN2(min_tmp, double(max_uintx));
1616  // Don't shrink less than the initial generation size
1617  minimum_desired_capacity = MAX2(minimum_desired_capacity,
1618                                  MetaspaceSize);
1619
1620  log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
1621  log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
1622                           minimum_free_percentage, maximum_used_percentage);
1623  log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
1624
1625
1626  size_t shrink_bytes = 0;
1627  if (capacity_until_GC < minimum_desired_capacity) {
1628    // If we have less capacity below the metaspace HWM, then
1629    // increment the HWM.
1630    size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1631    expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
1632    // Don't expand unless it's significant
1633    if (expand_bytes >= MinMetaspaceExpansion) {
1634      size_t new_capacity_until_GC = 0;
1635      bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
1636      assert(succeeded, "Should always succesfully increment HWM when at safepoint");
1637
1638      Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1639                                               new_capacity_until_GC,
1640                                               MetaspaceGCThresholdUpdater::ComputeNewSize);
1641      log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
1642                               minimum_desired_capacity / (double) K,
1643                               expand_bytes / (double) K,
1644                               MinMetaspaceExpansion / (double) K,
1645                               new_capacity_until_GC / (double) K);
1646    }
1647    return;
1648  }
1649
1650  // No expansion, now see if we want to shrink
1651  // We would never want to shrink more than this
1652  assert(capacity_until_GC >= minimum_desired_capacity,
1653         SIZE_FORMAT " >= " SIZE_FORMAT,
1654         capacity_until_GC, minimum_desired_capacity);
1655  size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1656
1657  // Should shrinking be considered?
1658  if (MaxMetaspaceFreeRatio < 100) {
1659    const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1660    const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1661    const double max_tmp = used_after_gc / minimum_used_percentage;
1662    size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1663    maximum_desired_capacity = MAX2(maximum_desired_capacity,
1664                                    MetaspaceSize);
1665    log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
1666                             maximum_free_percentage, minimum_used_percentage);
1667    log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
1668                             minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
1669
1670    assert(minimum_desired_capacity <= maximum_desired_capacity,
1671           "sanity check");
1672
1673    if (capacity_until_GC > maximum_desired_capacity) {
1674      // Capacity too large, compute shrinking size
1675      shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1676      // We don't want shrink all the way back to initSize if people call
1677      // System.gc(), because some programs do that between "phases" and then
1678      // we'd just have to grow the heap up again for the next phase.  So we
1679      // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1680      // on the third call, and 100% by the fourth call.  But if we recompute
1681      // size without shrinking, it goes back to 0%.
1682      shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1683
1684      shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
1685
1686      assert(shrink_bytes <= max_shrink_bytes,
1687             "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1688             shrink_bytes, max_shrink_bytes);
1689      if (current_shrink_factor == 0) {
1690        _shrink_factor = 10;
1691      } else {
1692        _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1693      }
1694      log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
1695                               MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
1696      log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
1697                               shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
1698    }
1699  }
1700
1701  // Don't shrink unless it's significant
1702  if (shrink_bytes >= MinMetaspaceExpansion &&
1703      ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1704    size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1705    Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1706                                             new_capacity_until_GC,
1707                                             MetaspaceGCThresholdUpdater::ComputeNewSize);
1708  }
1709}
1710
1711// Metadebug methods
1712
1713void Metadebug::init_allocation_fail_alot_count() {
1714  if (MetadataAllocationFailALot) {
1715    _allocation_fail_alot_count =
1716      1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1717  }
1718}
1719
1720#ifdef ASSERT
1721bool Metadebug::test_metadata_failure() {
1722  if (MetadataAllocationFailALot &&
1723      Threads::is_vm_complete()) {
1724    if (_allocation_fail_alot_count > 0) {
1725      _allocation_fail_alot_count--;
1726    } else {
1727      log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot");
1728      init_allocation_fail_alot_count();
1729      return true;
1730    }
1731  }
1732  return false;
1733}
1734#endif
1735
1736// ChunkManager methods
1737
1738size_t ChunkManager::free_chunks_total_words() {
1739  return _free_chunks_total;
1740}
1741
1742size_t ChunkManager::free_chunks_total_bytes() {
1743  return free_chunks_total_words() * BytesPerWord;
1744}
1745
1746// Update internal accounting after a chunk was added
1747void ChunkManager::account_for_added_chunk(const Metachunk* c) {
1748  assert_lock_strong(SpaceManager::expand_lock());
1749  _free_chunks_count ++;
1750  _free_chunks_total += c->word_size();
1751}
1752
1753// Update internal accounting after a chunk was removed
1754void ChunkManager::account_for_removed_chunk(const Metachunk* c) {
1755  assert_lock_strong(SpaceManager::expand_lock());
1756  assert(_free_chunks_count >= 1,
1757    "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count);
1758  assert(_free_chunks_total >= c->word_size(),
1759    "ChunkManager::_free_chunks_total: about to go negative"
1760     "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size());
1761  _free_chunks_count --;
1762  _free_chunks_total -= c->word_size();
1763}
1764
1765size_t ChunkManager::free_chunks_count() {
1766#ifdef ASSERT
1767  if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1768    MutexLockerEx cl(SpaceManager::expand_lock(),
1769                     Mutex::_no_safepoint_check_flag);
1770    // This lock is only needed in debug because the verification
1771    // of the _free_chunks_totals walks the list of free chunks
1772    slow_locked_verify_free_chunks_count();
1773  }
1774#endif
1775  return _free_chunks_count;
1776}
1777
1778ChunkIndex ChunkManager::list_index(size_t size) {
1779  if (size_by_index(SpecializedIndex) == size) {
1780    return SpecializedIndex;
1781  }
1782  if (size_by_index(SmallIndex) == size) {
1783    return SmallIndex;
1784  }
1785  const size_t med_size = size_by_index(MediumIndex);
1786  if (med_size == size) {
1787    return MediumIndex;
1788  }
1789
1790  assert(size > med_size, "Not a humongous chunk");
1791  return HumongousIndex;
1792}
1793
1794size_t ChunkManager::size_by_index(ChunkIndex index) {
1795  index_bounds_check(index);
1796  assert(index != HumongousIndex, "Do not call for humongous chunks.");
1797  return free_chunks(index)->size();
1798}
1799
1800void ChunkManager::locked_verify_free_chunks_total() {
1801  assert_lock_strong(SpaceManager::expand_lock());
1802  assert(sum_free_chunks() == _free_chunks_total,
1803         "_free_chunks_total " SIZE_FORMAT " is not the"
1804         " same as sum " SIZE_FORMAT, _free_chunks_total,
1805         sum_free_chunks());
1806}
1807
1808void ChunkManager::verify_free_chunks_total() {
1809  MutexLockerEx cl(SpaceManager::expand_lock(),
1810                     Mutex::_no_safepoint_check_flag);
1811  locked_verify_free_chunks_total();
1812}
1813
1814void ChunkManager::locked_verify_free_chunks_count() {
1815  assert_lock_strong(SpaceManager::expand_lock());
1816  assert(sum_free_chunks_count() == _free_chunks_count,
1817         "_free_chunks_count " SIZE_FORMAT " is not the"
1818         " same as sum " SIZE_FORMAT, _free_chunks_count,
1819         sum_free_chunks_count());
1820}
1821
1822void ChunkManager::verify_free_chunks_count() {
1823#ifdef ASSERT
1824  MutexLockerEx cl(SpaceManager::expand_lock(),
1825                     Mutex::_no_safepoint_check_flag);
1826  locked_verify_free_chunks_count();
1827#endif
1828}
1829
1830void ChunkManager::verify() {
1831  MutexLockerEx cl(SpaceManager::expand_lock(),
1832                     Mutex::_no_safepoint_check_flag);
1833  locked_verify();
1834}
1835
1836void ChunkManager::locked_verify() {
1837  locked_verify_free_chunks_count();
1838  locked_verify_free_chunks_total();
1839}
1840
1841void ChunkManager::locked_print_free_chunks(outputStream* st) {
1842  assert_lock_strong(SpaceManager::expand_lock());
1843  st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1844                _free_chunks_total, _free_chunks_count);
1845}
1846
1847void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1848  assert_lock_strong(SpaceManager::expand_lock());
1849  st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1850                sum_free_chunks(), sum_free_chunks_count());
1851}
1852
1853ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1854  assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
1855         "Bad index: %d", (int)index);
1856
1857  return &_free_chunks[index];
1858}
1859
1860// These methods that sum the free chunk lists are used in printing
1861// methods that are used in product builds.
1862size_t ChunkManager::sum_free_chunks() {
1863  assert_lock_strong(SpaceManager::expand_lock());
1864  size_t result = 0;
1865  for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1866    ChunkList* list = free_chunks(i);
1867
1868    if (list == NULL) {
1869      continue;
1870    }
1871
1872    result = result + list->count() * list->size();
1873  }
1874  result = result + humongous_dictionary()->total_size();
1875  return result;
1876}
1877
1878size_t ChunkManager::sum_free_chunks_count() {
1879  assert_lock_strong(SpaceManager::expand_lock());
1880  size_t count = 0;
1881  for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1882    ChunkList* list = free_chunks(i);
1883    if (list == NULL) {
1884      continue;
1885    }
1886    count = count + list->count();
1887  }
1888  count = count + humongous_dictionary()->total_free_blocks();
1889  return count;
1890}
1891
1892ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1893  ChunkIndex index = list_index(word_size);
1894  assert(index < HumongousIndex, "No humongous list");
1895  return free_chunks(index);
1896}
1897
1898Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1899  assert_lock_strong(SpaceManager::expand_lock());
1900
1901  slow_locked_verify();
1902
1903  Metachunk* chunk = NULL;
1904  if (list_index(word_size) != HumongousIndex) {
1905    ChunkList* free_list = find_free_chunks_list(word_size);
1906    assert(free_list != NULL, "Sanity check");
1907
1908    chunk = free_list->head();
1909
1910    if (chunk == NULL) {
1911      return NULL;
1912    }
1913
1914    // Remove the chunk as the head of the list.
1915    free_list->remove_chunk(chunk);
1916
1917    log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list " PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1918                                       p2i(free_list), p2i(chunk), chunk->word_size());
1919  } else {
1920    chunk = humongous_dictionary()->get_chunk(
1921      word_size,
1922      FreeBlockDictionary<Metachunk>::atLeast);
1923
1924    if (chunk == NULL) {
1925      return NULL;
1926    }
1927
1928    log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
1929                                    chunk->word_size(), word_size, chunk->word_size() - word_size);
1930  }
1931
1932  // Chunk has been removed from the chunk manager; update counters.
1933  account_for_removed_chunk(chunk);
1934
1935  // Remove it from the links to this freelist
1936  chunk->set_next(NULL);
1937  chunk->set_prev(NULL);
1938#ifdef ASSERT
1939  // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1940  // work.
1941  chunk->set_is_tagged_free(false);
1942#endif
1943  chunk->container()->inc_container_count();
1944
1945  slow_locked_verify();
1946  return chunk;
1947}
1948
1949Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1950  assert_lock_strong(SpaceManager::expand_lock());
1951  slow_locked_verify();
1952
1953  // Take from the beginning of the list
1954  Metachunk* chunk = free_chunks_get(word_size);
1955  if (chunk == NULL) {
1956    return NULL;
1957  }
1958
1959  assert((word_size <= chunk->word_size()) ||
1960         (list_index(chunk->word_size()) == HumongousIndex),
1961         "Non-humongous variable sized chunk");
1962  Log(gc, metaspace, freelist) log;
1963  if (log.is_debug()) {
1964    size_t list_count;
1965    if (list_index(word_size) < HumongousIndex) {
1966      ChunkList* list = find_free_chunks_list(word_size);
1967      list_count = list->count();
1968    } else {
1969      list_count = humongous_dictionary()->total_count();
1970    }
1971    log.debug("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1972               p2i(this), p2i(chunk), chunk->word_size(), list_count);
1973    ResourceMark rm;
1974    locked_print_free_chunks(log.debug_stream());
1975  }
1976
1977  return chunk;
1978}
1979
1980void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
1981  assert_lock_strong(SpaceManager::expand_lock());
1982  assert(chunk != NULL, "Expected chunk.");
1983  assert(chunk->container() != NULL, "Container should have been set.");
1984  assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
1985  index_bounds_check(index);
1986
1987  // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
1988  // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
1989  // keeps tree node pointers in the chunk payload area which mangle will overwrite.
1990  NOT_PRODUCT(chunk->mangle(badMetaWordVal);)
1991
1992  if (index != HumongousIndex) {
1993    // Return non-humongous chunk to freelist.
1994    ChunkList* list = free_chunks(index);
1995    assert(list->size() == chunk->word_size(), "Wrong chunk type.");
1996    list->return_chunk_at_head(chunk);
1997    log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.",
1998        chunk_size_name(index), p2i(chunk));
1999  } else {
2000    // Return humongous chunk to dictionary.
2001    assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type.");
2002    assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0,
2003           "Humongous chunk has wrong alignment.");
2004    _humongous_dictionary.return_chunk(chunk);
2005    log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
2006        chunk_size_name(index), p2i(chunk), chunk->word_size());
2007  }
2008  chunk->container()->dec_container_count();
2009  DEBUG_ONLY(chunk->set_is_tagged_free(true);)
2010
2011  // Chunk has been added; update counters.
2012  account_for_added_chunk(chunk);
2013
2014}
2015
2016void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) {
2017  index_bounds_check(index);
2018  if (chunks == NULL) {
2019    return;
2020  }
2021  LogTarget(Trace, gc, metaspace, freelist) log;
2022  if (log.is_enabled()) { // tracing
2023    log.print("returning list of %s chunks...", chunk_size_name(index));
2024  }
2025  unsigned num_chunks_returned = 0;
2026  size_t size_chunks_returned = 0;
2027  Metachunk* cur = chunks;
2028  while (cur != NULL) {
2029    // Capture the next link before it is changed
2030    // by the call to return_chunk_at_head();
2031    Metachunk* next = cur->next();
2032    if (log.is_enabled()) { // tracing
2033      num_chunks_returned ++;
2034      size_chunks_returned += cur->word_size();
2035    }
2036    return_single_chunk(index, cur);
2037    cur = next;
2038  }
2039  if (log.is_enabled()) { // tracing
2040    log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".",
2041        num_chunks_returned, chunk_size_name(index), size_chunks_returned);
2042    if (index != HumongousIndex) {
2043      log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size());
2044    } else {
2045      log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count());
2046    }
2047  }
2048}
2049
2050void ChunkManager::print_on(outputStream* out) const {
2051  const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics(out);
2052}
2053
2054// SpaceManager methods
2055
2056size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
2057  size_t chunk_sizes[] = {
2058      specialized_chunk_size(is_class_space),
2059      small_chunk_size(is_class_space),
2060      medium_chunk_size(is_class_space)
2061  };
2062
2063  // Adjust up to one of the fixed chunk sizes ...
2064  for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
2065    if (requested <= chunk_sizes[i]) {
2066      return chunk_sizes[i];
2067    }
2068  }
2069
2070  // ... or return the size as a humongous chunk.
2071  return requested;
2072}
2073
2074size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
2075  return adjust_initial_chunk_size(requested, is_class());
2076}
2077
2078size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
2079  size_t requested;
2080
2081  if (is_class()) {
2082    switch (type) {
2083    case Metaspace::BootMetaspaceType:       requested = Metaspace::first_class_chunk_word_size(); break;
2084    case Metaspace::ROMetaspaceType:         requested = ClassSpecializedChunk; break;
2085    case Metaspace::ReadWriteMetaspaceType:  requested = ClassSpecializedChunk; break;
2086    case Metaspace::AnonymousMetaspaceType:  requested = ClassSpecializedChunk; break;
2087    case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
2088    default:                                 requested = ClassSmallChunk; break;
2089    }
2090  } else {
2091    switch (type) {
2092    case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
2093    case Metaspace::ROMetaspaceType:         requested = SharedReadOnlySize / wordSize; break;
2094    case Metaspace::ReadWriteMetaspaceType:  requested = SharedReadWriteSize / wordSize; break;
2095    case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
2096    case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
2097    default:                                 requested = SmallChunk; break;
2098    }
2099  }
2100
2101  // Adjust to one of the fixed chunk sizes (unless humongous)
2102  const size_t adjusted = adjust_initial_chunk_size(requested);
2103
2104  assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
2105         SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
2106
2107  return adjusted;
2108}
2109
2110size_t SpaceManager::sum_free_in_chunks_in_use() const {
2111  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2112  size_t free = 0;
2113  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2114    Metachunk* chunk = chunks_in_use(i);
2115    while (chunk != NULL) {
2116      free += chunk->free_word_size();
2117      chunk = chunk->next();
2118    }
2119  }
2120  return free;
2121}
2122
2123size_t SpaceManager::sum_waste_in_chunks_in_use() const {
2124  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2125  size_t result = 0;
2126  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2127   result += sum_waste_in_chunks_in_use(i);
2128  }
2129
2130  return result;
2131}
2132
2133size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
2134  size_t result = 0;
2135  Metachunk* chunk = chunks_in_use(index);
2136  // Count the free space in all the chunk but not the
2137  // current chunk from which allocations are still being done.
2138  while (chunk != NULL) {
2139    if (chunk != current_chunk()) {
2140      result += chunk->free_word_size();
2141    }
2142    chunk = chunk->next();
2143  }
2144  return result;
2145}
2146
2147size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
2148  // For CMS use "allocated_chunks_words()" which does not need the
2149  // Metaspace lock.  For the other collectors sum over the
2150  // lists.  Use both methods as a check that "allocated_chunks_words()"
2151  // is correct.  That is, sum_capacity_in_chunks() is too expensive
2152  // to use in the product and allocated_chunks_words() should be used
2153  // but allow for  checking that allocated_chunks_words() returns the same
2154  // value as sum_capacity_in_chunks_in_use() which is the definitive
2155  // answer.
2156  if (UseConcMarkSweepGC) {
2157    return allocated_chunks_words();
2158  } else {
2159    MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2160    size_t sum = 0;
2161    for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2162      Metachunk* chunk = chunks_in_use(i);
2163      while (chunk != NULL) {
2164        sum += chunk->word_size();
2165        chunk = chunk->next();
2166      }
2167    }
2168  return sum;
2169  }
2170}
2171
2172size_t SpaceManager::sum_count_in_chunks_in_use() {
2173  size_t count = 0;
2174  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2175    count = count + sum_count_in_chunks_in_use(i);
2176  }
2177
2178  return count;
2179}
2180
2181size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
2182  size_t count = 0;
2183  Metachunk* chunk = chunks_in_use(i);
2184  while (chunk != NULL) {
2185    count++;
2186    chunk = chunk->next();
2187  }
2188  return count;
2189}
2190
2191
2192size_t SpaceManager::sum_used_in_chunks_in_use() const {
2193  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2194  size_t used = 0;
2195  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2196    Metachunk* chunk = chunks_in_use(i);
2197    while (chunk != NULL) {
2198      used += chunk->used_word_size();
2199      chunk = chunk->next();
2200    }
2201  }
2202  return used;
2203}
2204
2205void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
2206
2207  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2208    Metachunk* chunk = chunks_in_use(i);
2209    st->print("SpaceManager: %s " PTR_FORMAT,
2210                 chunk_size_name(i), p2i(chunk));
2211    if (chunk != NULL) {
2212      st->print_cr(" free " SIZE_FORMAT,
2213                   chunk->free_word_size());
2214    } else {
2215      st->cr();
2216    }
2217  }
2218
2219  chunk_manager()->locked_print_free_chunks(st);
2220  chunk_manager()->locked_print_sum_free_chunks(st);
2221}
2222
2223size_t SpaceManager::calc_chunk_size(size_t word_size) {
2224
2225  // Decide between a small chunk and a medium chunk.  Up to
2226  // _small_chunk_limit small chunks can be allocated.
2227  // After that a medium chunk is preferred.
2228  size_t chunk_word_size;
2229  if (chunks_in_use(MediumIndex) == NULL &&
2230      sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
2231    chunk_word_size = (size_t) small_chunk_size();
2232    if (word_size + Metachunk::overhead() > small_chunk_size()) {
2233      chunk_word_size = medium_chunk_size();
2234    }
2235  } else {
2236    chunk_word_size = medium_chunk_size();
2237  }
2238
2239  // Might still need a humongous chunk.  Enforce
2240  // humongous allocations sizes to be aligned up to
2241  // the smallest chunk size.
2242  size_t if_humongous_sized_chunk =
2243    align_size_up(word_size + Metachunk::overhead(),
2244                  smallest_chunk_size());
2245  chunk_word_size =
2246    MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
2247
2248  assert(!SpaceManager::is_humongous(word_size) ||
2249         chunk_word_size == if_humongous_sized_chunk,
2250         "Size calculation is wrong, word_size " SIZE_FORMAT
2251         " chunk_word_size " SIZE_FORMAT,
2252         word_size, chunk_word_size);
2253  Log(gc, metaspace, alloc) log;
2254  if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
2255    log.debug("Metadata humongous allocation:");
2256    log.debug("  word_size " PTR_FORMAT, word_size);
2257    log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
2258    log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
2259  }
2260  return chunk_word_size;
2261}
2262
2263void SpaceManager::track_metaspace_memory_usage() {
2264  if (is_init_completed()) {
2265    if (is_class()) {
2266      MemoryService::track_compressed_class_memory_usage();
2267    }
2268    MemoryService::track_metaspace_memory_usage();
2269  }
2270}
2271
2272MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
2273  assert(vs_list()->current_virtual_space() != NULL,
2274         "Should have been set");
2275  assert(current_chunk() == NULL ||
2276         current_chunk()->allocate(word_size) == NULL,
2277         "Don't need to expand");
2278  MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2279
2280  if (log_is_enabled(Trace, gc, metaspace, freelist)) {
2281    size_t words_left = 0;
2282    size_t words_used = 0;
2283    if (current_chunk() != NULL) {
2284      words_left = current_chunk()->free_word_size();
2285      words_used = current_chunk()->used_word_size();
2286    }
2287    log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
2288                                       word_size, words_used, words_left);
2289  }
2290
2291  // Get another chunk
2292  size_t chunk_word_size = calc_chunk_size(word_size);
2293  Metachunk* next = get_new_chunk(chunk_word_size);
2294
2295  MetaWord* mem = NULL;
2296
2297  // If a chunk was available, add it to the in-use chunk list
2298  // and do an allocation from it.
2299  if (next != NULL) {
2300    // Add to this manager's list of chunks in use.
2301    add_chunk(next, false);
2302    mem = next->allocate(word_size);
2303  }
2304
2305  // Track metaspace memory usage statistic.
2306  track_metaspace_memory_usage();
2307
2308  return mem;
2309}
2310
2311void SpaceManager::print_on(outputStream* st) const {
2312
2313  for (ChunkIndex i = ZeroIndex;
2314       i < NumberOfInUseLists ;
2315       i = next_chunk_index(i) ) {
2316    st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT,
2317                 p2i(chunks_in_use(i)),
2318                 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
2319  }
2320  st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
2321               " Humongous " SIZE_FORMAT,
2322               sum_waste_in_chunks_in_use(SmallIndex),
2323               sum_waste_in_chunks_in_use(MediumIndex),
2324               sum_waste_in_chunks_in_use(HumongousIndex));
2325  // block free lists
2326  if (block_freelists() != NULL) {
2327    st->print_cr("total in block free lists " SIZE_FORMAT,
2328      block_freelists()->total_size());
2329  }
2330}
2331
2332SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
2333                           Mutex* lock) :
2334  _mdtype(mdtype),
2335  _allocated_blocks_words(0),
2336  _allocated_chunks_words(0),
2337  _allocated_chunks_count(0),
2338  _block_freelists(NULL),
2339  _lock(lock)
2340{
2341  initialize();
2342}
2343
2344void SpaceManager::inc_size_metrics(size_t words) {
2345  assert_lock_strong(SpaceManager::expand_lock());
2346  // Total of allocated Metachunks and allocated Metachunks count
2347  // for each SpaceManager
2348  _allocated_chunks_words = _allocated_chunks_words + words;
2349  _allocated_chunks_count++;
2350  // Global total of capacity in allocated Metachunks
2351  MetaspaceAux::inc_capacity(mdtype(), words);
2352  // Global total of allocated Metablocks.
2353  // used_words_slow() includes the overhead in each
2354  // Metachunk so include it in the used when the
2355  // Metachunk is first added (so only added once per
2356  // Metachunk).
2357  MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2358}
2359
2360void SpaceManager::inc_used_metrics(size_t words) {
2361  // Add to the per SpaceManager total
2362  Atomic::add_ptr(words, &_allocated_blocks_words);
2363  // Add to the global total
2364  MetaspaceAux::inc_used(mdtype(), words);
2365}
2366
2367void SpaceManager::dec_total_from_size_metrics() {
2368  MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2369  MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2370  // Also deduct the overhead per Metachunk
2371  MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2372}
2373
2374void SpaceManager::initialize() {
2375  Metadebug::init_allocation_fail_alot_count();
2376  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2377    _chunks_in_use[i] = NULL;
2378  }
2379  _current_chunk = NULL;
2380  log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
2381}
2382
2383SpaceManager::~SpaceManager() {
2384  // This call this->_lock which can't be done while holding expand_lock()
2385  assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2386         "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2387         " allocated_chunks_words() " SIZE_FORMAT,
2388         sum_capacity_in_chunks_in_use(), allocated_chunks_words());
2389
2390  MutexLockerEx fcl(SpaceManager::expand_lock(),
2391                    Mutex::_no_safepoint_check_flag);
2392
2393  chunk_manager()->slow_locked_verify();
2394
2395  dec_total_from_size_metrics();
2396
2397  Log(gc, metaspace, freelist) log;
2398  if (log.is_trace()) {
2399    log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
2400    ResourceMark rm;
2401    locked_print_chunks_in_use_on(log.trace_stream());
2402    if (block_freelists() != NULL) {
2403    block_freelists()->print_on(log.trace_stream());
2404  }
2405  }
2406
2407  // Add all the chunks in use by this space manager
2408  // to the global list of free chunks.
2409
2410  // Follow each list of chunks-in-use and add them to the
2411  // free lists.  Each list is NULL terminated.
2412
2413  for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
2414    Metachunk* chunks = chunks_in_use(i);
2415    chunk_manager()->return_chunk_list(i, chunks);
2416    set_chunks_in_use(i, NULL);
2417  }
2418
2419  chunk_manager()->slow_locked_verify();
2420
2421  if (_block_freelists != NULL) {
2422    delete _block_freelists;
2423  }
2424}
2425
2426void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2427  assert_lock_strong(_lock);
2428  // Allocations and deallocations are in raw_word_size
2429  size_t raw_word_size = get_allocation_word_size(word_size);
2430  // Lazily create a block_freelist
2431  if (block_freelists() == NULL) {
2432    _block_freelists = new BlockFreelist();
2433  }
2434  block_freelists()->return_block(p, raw_word_size);
2435}
2436
2437// Adds a chunk to the list of chunks in use.
2438void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2439
2440  assert(new_chunk != NULL, "Should not be NULL");
2441  assert(new_chunk->next() == NULL, "Should not be on a list");
2442
2443  new_chunk->reset_empty();
2444
2445  // Find the correct list and and set the current
2446  // chunk for that list.
2447  ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
2448
2449  if (index != HumongousIndex) {
2450    retire_current_chunk();
2451    set_current_chunk(new_chunk);
2452    new_chunk->set_next(chunks_in_use(index));
2453    set_chunks_in_use(index, new_chunk);
2454  } else {
2455    // For null class loader data and DumpSharedSpaces, the first chunk isn't
2456    // small, so small will be null.  Link this first chunk as the current
2457    // chunk.
2458    if (make_current) {
2459      // Set as the current chunk but otherwise treat as a humongous chunk.
2460      set_current_chunk(new_chunk);
2461    }
2462    // Link at head.  The _current_chunk only points to a humongous chunk for
2463    // the null class loader metaspace (class and data virtual space managers)
2464    // any humongous chunks so will not point to the tail
2465    // of the humongous chunks list.
2466    new_chunk->set_next(chunks_in_use(HumongousIndex));
2467    set_chunks_in_use(HumongousIndex, new_chunk);
2468
2469    assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2470  }
2471
2472  // Add to the running sum of capacity
2473  inc_size_metrics(new_chunk->word_size());
2474
2475  assert(new_chunk->is_empty(), "Not ready for reuse");
2476  Log(gc, metaspace, freelist) log;
2477  if (log.is_trace()) {
2478    log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
2479    ResourceMark rm;
2480    outputStream* out = log.trace_stream();
2481    new_chunk->print_on(out);
2482    chunk_manager()->locked_print_free_chunks(out);
2483  }
2484}
2485
2486void SpaceManager::retire_current_chunk() {
2487  if (current_chunk() != NULL) {
2488    size_t remaining_words = current_chunk()->free_word_size();
2489    if (remaining_words >= BlockFreelist::min_dictionary_size()) {
2490      MetaWord* ptr = current_chunk()->allocate(remaining_words);
2491      deallocate(ptr, remaining_words);
2492      inc_used_metrics(remaining_words);
2493    }
2494  }
2495}
2496
2497Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
2498  // Get a chunk from the chunk freelist
2499  Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
2500
2501  if (next == NULL) {
2502    next = vs_list()->get_new_chunk(chunk_word_size,
2503                                    medium_chunk_bunch());
2504  }
2505
2506  Log(gc, metaspace, alloc) log;
2507  if (log.is_debug() && next != NULL &&
2508      SpaceManager::is_humongous(next->word_size())) {
2509    log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());
2510  }
2511
2512  return next;
2513}
2514
2515/*
2516 * The policy is to allocate up to _small_chunk_limit small chunks
2517 * after which only medium chunks are allocated.  This is done to
2518 * reduce fragmentation.  In some cases, this can result in a lot
2519 * of small chunks being allocated to the point where it's not
2520 * possible to expand.  If this happens, there may be no medium chunks
2521 * available and OOME would be thrown.  Instead of doing that,
2522 * if the allocation request size fits in a small chunk, an attempt
2523 * will be made to allocate a small chunk.
2524 */
2525MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) {
2526  size_t raw_word_size = get_allocation_word_size(word_size);
2527
2528  if (raw_word_size + Metachunk::overhead() > small_chunk_size()) {
2529    return NULL;
2530  }
2531
2532  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2533  MutexLockerEx cl1(expand_lock(), Mutex::_no_safepoint_check_flag);
2534
2535  Metachunk* chunk = chunk_manager()->chunk_freelist_allocate(small_chunk_size());
2536
2537  MetaWord* mem = NULL;
2538
2539  if (chunk != NULL) {
2540    // Add chunk to the in-use chunk list and do an allocation from it.
2541    // Add to this manager's list of chunks in use.
2542    add_chunk(chunk, false);
2543    mem = chunk->allocate(raw_word_size);
2544
2545    inc_used_metrics(raw_word_size);
2546
2547    // Track metaspace memory usage statistic.
2548    track_metaspace_memory_usage();
2549  }
2550
2551  return mem;
2552}
2553
2554MetaWord* SpaceManager::allocate(size_t word_size) {
2555  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2556  size_t raw_word_size = get_allocation_word_size(word_size);
2557  BlockFreelist* fl =  block_freelists();
2558  MetaWord* p = NULL;
2559  // Allocation from the dictionary is expensive in the sense that
2560  // the dictionary has to be searched for a size.  Don't allocate
2561  // from the dictionary until it starts to get fat.  Is this
2562  // a reasonable policy?  Maybe an skinny dictionary is fast enough
2563  // for allocations.  Do some profiling.  JJJ
2564  if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
2565    p = fl->get_block(raw_word_size);
2566  }
2567  if (p == NULL) {
2568    p = allocate_work(raw_word_size);
2569  }
2570
2571  return p;
2572}
2573
2574// Returns the address of spaced allocated for "word_size".
2575// This methods does not know about blocks (Metablocks)
2576MetaWord* SpaceManager::allocate_work(size_t word_size) {
2577  assert_lock_strong(_lock);
2578#ifdef ASSERT
2579  if (Metadebug::test_metadata_failure()) {
2580    return NULL;
2581  }
2582#endif
2583  // Is there space in the current chunk?
2584  MetaWord* result = NULL;
2585
2586  // For DumpSharedSpaces, only allocate out of the current chunk which is
2587  // never null because we gave it the size we wanted.   Caller reports out
2588  // of memory if this returns null.
2589  if (DumpSharedSpaces) {
2590    assert(current_chunk() != NULL, "should never happen");
2591    inc_used_metrics(word_size);
2592    return current_chunk()->allocate(word_size); // caller handles null result
2593  }
2594
2595  if (current_chunk() != NULL) {
2596    result = current_chunk()->allocate(word_size);
2597  }
2598
2599  if (result == NULL) {
2600    result = grow_and_allocate(word_size);
2601  }
2602
2603  if (result != NULL) {
2604    inc_used_metrics(word_size);
2605    assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2606           "Head of the list is being allocated");
2607  }
2608
2609  return result;
2610}
2611
2612void SpaceManager::verify() {
2613  // If there are blocks in the dictionary, then
2614  // verification of chunks does not work since
2615  // being in the dictionary alters a chunk.
2616  if (block_freelists() != NULL && block_freelists()->total_size() == 0) {
2617    for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2618      Metachunk* curr = chunks_in_use(i);
2619      while (curr != NULL) {
2620        curr->verify();
2621        verify_chunk_size(curr);
2622        curr = curr->next();
2623      }
2624    }
2625  }
2626}
2627
2628void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2629  assert(is_humongous(chunk->word_size()) ||
2630         chunk->word_size() == medium_chunk_size() ||
2631         chunk->word_size() == small_chunk_size() ||
2632         chunk->word_size() == specialized_chunk_size(),
2633         "Chunk size is wrong");
2634  return;
2635}
2636
2637#ifdef ASSERT
2638void SpaceManager::verify_allocated_blocks_words() {
2639  // Verification is only guaranteed at a safepoint.
2640  assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
2641    "Verification can fail if the applications is running");
2642  assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2643         "allocation total is not consistent " SIZE_FORMAT
2644         " vs " SIZE_FORMAT,
2645         allocated_blocks_words(), sum_used_in_chunks_in_use());
2646}
2647
2648#endif
2649
2650void SpaceManager::dump(outputStream* const out) const {
2651  size_t curr_total = 0;
2652  size_t waste = 0;
2653  uint i = 0;
2654  size_t used = 0;
2655  size_t capacity = 0;
2656
2657  // Add up statistics for all chunks in this SpaceManager.
2658  for (ChunkIndex index = ZeroIndex;
2659       index < NumberOfInUseLists;
2660       index = next_chunk_index(index)) {
2661    for (Metachunk* curr = chunks_in_use(index);
2662         curr != NULL;
2663         curr = curr->next()) {
2664      out->print("%d) ", i++);
2665      curr->print_on(out);
2666      curr_total += curr->word_size();
2667      used += curr->used_word_size();
2668      capacity += curr->word_size();
2669      waste += curr->free_word_size() + curr->overhead();;
2670    }
2671  }
2672
2673  if (log_is_enabled(Trace, gc, metaspace, freelist)) {
2674    if (block_freelists() != NULL) block_freelists()->print_on(out);
2675  }
2676
2677  size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2678  // Free space isn't wasted.
2679  waste -= free;
2680
2681  out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
2682                " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2683                " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2684}
2685
2686// MetaspaceAux
2687
2688
2689size_t MetaspaceAux::_capacity_words[] = {0, 0};
2690size_t MetaspaceAux::_used_words[] = {0, 0};
2691
2692size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
2693  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2694  return list == NULL ? 0 : list->free_bytes();
2695}
2696
2697size_t MetaspaceAux::free_bytes() {
2698  return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
2699}
2700
2701void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2702  assert_lock_strong(SpaceManager::expand_lock());
2703  assert(words <= capacity_words(mdtype),
2704         "About to decrement below 0: words " SIZE_FORMAT
2705         " is greater than _capacity_words[%u] " SIZE_FORMAT,
2706         words, mdtype, capacity_words(mdtype));
2707  _capacity_words[mdtype] -= words;
2708}
2709
2710void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2711  assert_lock_strong(SpaceManager::expand_lock());
2712  // Needs to be atomic
2713  _capacity_words[mdtype] += words;
2714}
2715
2716void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2717  assert(words <= used_words(mdtype),
2718         "About to decrement below 0: words " SIZE_FORMAT
2719         " is greater than _used_words[%u] " SIZE_FORMAT,
2720         words, mdtype, used_words(mdtype));
2721  // For CMS deallocation of the Metaspaces occurs during the
2722  // sweep which is a concurrent phase.  Protection by the expand_lock()
2723  // is not enough since allocation is on a per Metaspace basis
2724  // and protected by the Metaspace lock.
2725  jlong minus_words = (jlong) - (jlong) words;
2726  Atomic::add_ptr(minus_words, &_used_words[mdtype]);
2727}
2728
2729void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2730  // _used_words tracks allocations for
2731  // each piece of metadata.  Those allocations are
2732  // generally done concurrently by different application
2733  // threads so must be done atomically.
2734  Atomic::add_ptr(words, &_used_words[mdtype]);
2735}
2736
2737size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2738  size_t used = 0;
2739  ClassLoaderDataGraphMetaspaceIterator iter;
2740  while (iter.repeat()) {
2741    Metaspace* msp = iter.get_next();
2742    // Sum allocated_blocks_words for each metaspace
2743    if (msp != NULL) {
2744      used += msp->used_words_slow(mdtype);
2745    }
2746  }
2747  return used * BytesPerWord;
2748}
2749
2750size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
2751  size_t free = 0;
2752  ClassLoaderDataGraphMetaspaceIterator iter;
2753  while (iter.repeat()) {
2754    Metaspace* msp = iter.get_next();
2755    if (msp != NULL) {
2756      free += msp->free_words_slow(mdtype);
2757    }
2758  }
2759  return free * BytesPerWord;
2760}
2761
2762size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2763  if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
2764    return 0;
2765  }
2766  // Don't count the space in the freelists.  That space will be
2767  // added to the capacity calculation as needed.
2768  size_t capacity = 0;
2769  ClassLoaderDataGraphMetaspaceIterator iter;
2770  while (iter.repeat()) {
2771    Metaspace* msp = iter.get_next();
2772    if (msp != NULL) {
2773      capacity += msp->capacity_words_slow(mdtype);
2774    }
2775  }
2776  return capacity * BytesPerWord;
2777}
2778
2779size_t MetaspaceAux::capacity_bytes_slow() {
2780#ifdef PRODUCT
2781  // Use capacity_bytes() in PRODUCT instead of this function.
2782  guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
2783#endif
2784  size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
2785  size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
2786  assert(capacity_bytes() == class_capacity + non_class_capacity,
2787         "bad accounting: capacity_bytes() " SIZE_FORMAT
2788         " class_capacity + non_class_capacity " SIZE_FORMAT
2789         " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
2790         capacity_bytes(), class_capacity + non_class_capacity,
2791         class_capacity, non_class_capacity);
2792
2793  return class_capacity + non_class_capacity;
2794}
2795
2796size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
2797  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2798  return list == NULL ? 0 : list->reserved_bytes();
2799}
2800
2801size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
2802  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2803  return list == NULL ? 0 : list->committed_bytes();
2804}
2805
2806size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
2807
2808size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
2809  ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
2810  if (chunk_manager == NULL) {
2811    return 0;
2812  }
2813  chunk_manager->slow_verify();
2814  return chunk_manager->free_chunks_total_words();
2815}
2816
2817size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
2818  return free_chunks_total_words(mdtype) * BytesPerWord;
2819}
2820
2821size_t MetaspaceAux::free_chunks_total_words() {
2822  return free_chunks_total_words(Metaspace::ClassType) +
2823         free_chunks_total_words(Metaspace::NonClassType);
2824}
2825
2826size_t MetaspaceAux::free_chunks_total_bytes() {
2827  return free_chunks_total_words() * BytesPerWord;
2828}
2829
2830bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) {
2831  return Metaspace::get_chunk_manager(mdtype) != NULL;
2832}
2833
2834MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
2835  if (!has_chunk_free_list(mdtype)) {
2836    return MetaspaceChunkFreeListSummary();
2837  }
2838
2839  const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
2840  return cm->chunk_free_list_summary();
2841}
2842
2843void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2844  log_info(gc, metaspace)("Metaspace: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
2845                          prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
2846}
2847
2848void MetaspaceAux::print_on(outputStream* out) {
2849  Metaspace::MetadataType nct = Metaspace::NonClassType;
2850
2851  out->print_cr(" Metaspace       "
2852                "used "      SIZE_FORMAT "K, "
2853                "capacity "  SIZE_FORMAT "K, "
2854                "committed " SIZE_FORMAT "K, "
2855                "reserved "  SIZE_FORMAT "K",
2856                used_bytes()/K,
2857                capacity_bytes()/K,
2858                committed_bytes()/K,
2859                reserved_bytes()/K);
2860
2861  if (Metaspace::using_class_space()) {
2862    Metaspace::MetadataType ct = Metaspace::ClassType;
2863    out->print_cr("  class space    "
2864                  "used "      SIZE_FORMAT "K, "
2865                  "capacity "  SIZE_FORMAT "K, "
2866                  "committed " SIZE_FORMAT "K, "
2867                  "reserved "  SIZE_FORMAT "K",
2868                  used_bytes(ct)/K,
2869                  capacity_bytes(ct)/K,
2870                  committed_bytes(ct)/K,
2871                  reserved_bytes(ct)/K);
2872  }
2873}
2874
2875// Print information for class space and data space separately.
2876// This is almost the same as above.
2877void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2878  size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
2879  size_t capacity_bytes = capacity_bytes_slow(mdtype);
2880  size_t used_bytes = used_bytes_slow(mdtype);
2881  size_t free_bytes = free_bytes_slow(mdtype);
2882  size_t used_and_free = used_bytes + free_bytes +
2883                           free_chunks_capacity_bytes;
2884  out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
2885             "K + unused in chunks " SIZE_FORMAT "K  + "
2886             " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2887             "K  capacity in allocated chunks " SIZE_FORMAT "K",
2888             used_bytes / K,
2889             free_bytes / K,
2890             free_chunks_capacity_bytes / K,
2891             used_and_free / K,
2892             capacity_bytes / K);
2893  // Accounting can only be correct if we got the values during a safepoint
2894  assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2895}
2896
2897// Print total fragmentation for class metaspaces
2898void MetaspaceAux::print_class_waste(outputStream* out) {
2899  assert(Metaspace::using_class_space(), "class metaspace not used");
2900  size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
2901  size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
2902  ClassLoaderDataGraphMetaspaceIterator iter;
2903  while (iter.repeat()) {
2904    Metaspace* msp = iter.get_next();
2905    if (msp != NULL) {
2906      cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2907      cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2908      cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2909      cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2910      cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2911      cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2912      cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2913    }
2914  }
2915  out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2916                SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2917                SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2918                "large count " SIZE_FORMAT,
2919                cls_specialized_count, cls_specialized_waste,
2920                cls_small_count, cls_small_waste,
2921                cls_medium_count, cls_medium_waste, cls_humongous_count);
2922}
2923
2924// Print total fragmentation for data and class metaspaces separately
2925void MetaspaceAux::print_waste(outputStream* out) {
2926  size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
2927  size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
2928
2929  ClassLoaderDataGraphMetaspaceIterator iter;
2930  while (iter.repeat()) {
2931    Metaspace* msp = iter.get_next();
2932    if (msp != NULL) {
2933      specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2934      specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2935      small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2936      small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2937      medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2938      medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2939      humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2940    }
2941  }
2942  out->print_cr("Total fragmentation waste (words) doesn't count free space");
2943  out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2944                        SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2945                        SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2946                        "large count " SIZE_FORMAT,
2947             specialized_count, specialized_waste, small_count,
2948             small_waste, medium_count, medium_waste, humongous_count);
2949  if (Metaspace::using_class_space()) {
2950    print_class_waste(out);
2951  }
2952}
2953
2954// Dump global metaspace things from the end of ClassLoaderDataGraph
2955void MetaspaceAux::dump(outputStream* out) {
2956  out->print_cr("All Metaspace:");
2957  out->print("data space: "); print_on(out, Metaspace::NonClassType);
2958  out->print("class space: "); print_on(out, Metaspace::ClassType);
2959  print_waste(out);
2960}
2961
2962void MetaspaceAux::verify_free_chunks() {
2963  Metaspace::chunk_manager_metadata()->verify();
2964  if (Metaspace::using_class_space()) {
2965    Metaspace::chunk_manager_class()->verify();
2966  }
2967}
2968
2969void MetaspaceAux::verify_capacity() {
2970#ifdef ASSERT
2971  size_t running_sum_capacity_bytes = capacity_bytes();
2972  // For purposes of the running sum of capacity, verify against capacity
2973  size_t capacity_in_use_bytes = capacity_bytes_slow();
2974  assert(running_sum_capacity_bytes == capacity_in_use_bytes,
2975         "capacity_words() * BytesPerWord " SIZE_FORMAT
2976         " capacity_bytes_slow()" SIZE_FORMAT,
2977         running_sum_capacity_bytes, capacity_in_use_bytes);
2978  for (Metaspace::MetadataType i = Metaspace::ClassType;
2979       i < Metaspace:: MetadataTypeCount;
2980       i = (Metaspace::MetadataType)(i + 1)) {
2981    size_t capacity_in_use_bytes = capacity_bytes_slow(i);
2982    assert(capacity_bytes(i) == capacity_in_use_bytes,
2983           "capacity_bytes(%u) " SIZE_FORMAT
2984           " capacity_bytes_slow(%u)" SIZE_FORMAT,
2985           i, capacity_bytes(i), i, capacity_in_use_bytes);
2986  }
2987#endif
2988}
2989
2990void MetaspaceAux::verify_used() {
2991#ifdef ASSERT
2992  size_t running_sum_used_bytes = used_bytes();
2993  // For purposes of the running sum of used, verify against used
2994  size_t used_in_use_bytes = used_bytes_slow();
2995  assert(used_bytes() == used_in_use_bytes,
2996         "used_bytes() " SIZE_FORMAT
2997         " used_bytes_slow()" SIZE_FORMAT,
2998         used_bytes(), used_in_use_bytes);
2999  for (Metaspace::MetadataType i = Metaspace::ClassType;
3000       i < Metaspace:: MetadataTypeCount;
3001       i = (Metaspace::MetadataType)(i + 1)) {
3002    size_t used_in_use_bytes = used_bytes_slow(i);
3003    assert(used_bytes(i) == used_in_use_bytes,
3004           "used_bytes(%u) " SIZE_FORMAT
3005           " used_bytes_slow(%u)" SIZE_FORMAT,
3006           i, used_bytes(i), i, used_in_use_bytes);
3007  }
3008#endif
3009}
3010
3011void MetaspaceAux::verify_metrics() {
3012  verify_capacity();
3013  verify_used();
3014}
3015
3016
3017// Metaspace methods
3018
3019size_t Metaspace::_first_chunk_word_size = 0;
3020size_t Metaspace::_first_class_chunk_word_size = 0;
3021
3022size_t Metaspace::_commit_alignment = 0;
3023size_t Metaspace::_reserve_alignment = 0;
3024
3025Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
3026  initialize(lock, type);
3027}
3028
3029Metaspace::~Metaspace() {
3030  delete _vsm;
3031  if (using_class_space()) {
3032    delete _class_vsm;
3033  }
3034}
3035
3036VirtualSpaceList* Metaspace::_space_list = NULL;
3037VirtualSpaceList* Metaspace::_class_space_list = NULL;
3038
3039ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
3040ChunkManager* Metaspace::_chunk_manager_class = NULL;
3041
3042#define VIRTUALSPACEMULTIPLIER 2
3043
3044#ifdef _LP64
3045static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
3046
3047void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
3048  // Figure out the narrow_klass_base and the narrow_klass_shift.  The
3049  // narrow_klass_base is the lower of the metaspace base and the cds base
3050  // (if cds is enabled).  The narrow_klass_shift depends on the distance
3051  // between the lower base and higher address.
3052  address lower_base;
3053  address higher_address;
3054#if INCLUDE_CDS
3055  if (UseSharedSpaces) {
3056    higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
3057                          (address)(metaspace_base + compressed_class_space_size()));
3058    lower_base = MIN2(metaspace_base, cds_base);
3059  } else
3060#endif
3061  {
3062    higher_address = metaspace_base + compressed_class_space_size();
3063    lower_base = metaspace_base;
3064
3065    uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
3066    // If compressed class space fits in lower 32G, we don't need a base.
3067    if (higher_address <= (address)klass_encoding_max) {
3068      lower_base = 0; // Effectively lower base is zero.
3069    }
3070  }
3071
3072  Universe::set_narrow_klass_base(lower_base);
3073
3074  if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
3075    Universe::set_narrow_klass_shift(0);
3076  } else {
3077    assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
3078    Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
3079  }
3080  AOTLoader::set_narrow_klass_shift();
3081}
3082
3083#if INCLUDE_CDS
3084// Return TRUE if the specified metaspace_base and cds_base are close enough
3085// to work with compressed klass pointers.
3086bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
3087  assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
3088  assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3089  address lower_base = MIN2((address)metaspace_base, cds_base);
3090  address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
3091                                (address)(metaspace_base + compressed_class_space_size()));
3092  return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
3093}
3094#endif
3095
3096// Try to allocate the metaspace at the requested addr.
3097void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
3098  assert(using_class_space(), "called improperly");
3099  assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3100  assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
3101         "Metaspace size is too big");
3102  assert_is_ptr_aligned(requested_addr, _reserve_alignment);
3103  assert_is_ptr_aligned(cds_base, _reserve_alignment);
3104  assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment);
3105
3106  // Don't use large pages for the class space.
3107  bool large_pages = false;
3108
3109#if !(defined(AARCH64) || defined(AIX))
3110  ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
3111                                             _reserve_alignment,
3112                                             large_pages,
3113                                             requested_addr);
3114#else // AARCH64
3115  ReservedSpace metaspace_rs;
3116
3117  // Our compressed klass pointers may fit nicely into the lower 32
3118  // bits.
3119  if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
3120    metaspace_rs = ReservedSpace(compressed_class_space_size(),
3121                                 _reserve_alignment,
3122                                 large_pages,
3123                                 requested_addr);
3124  }
3125
3126  if (! metaspace_rs.is_reserved()) {
3127    // Aarch64: Try to align metaspace so that we can decode a compressed
3128    // klass with a single MOVK instruction.  We can do this iff the
3129    // compressed class base is a multiple of 4G.
3130    // Aix: Search for a place where we can find memory. If we need to load
3131    // the base, 4G alignment is helpful, too.
3132    size_t increment = AARCH64_ONLY(4*)G;
3133    for (char *a = align_ptr_up(requested_addr, increment);
3134         a < (char*)(1024*G);
3135         a += increment) {
3136      if (a == (char *)(32*G)) {
3137        // Go faster from here on. Zero-based is no longer possible.
3138        increment = 4*G;
3139      }
3140
3141#if INCLUDE_CDS
3142      if (UseSharedSpaces
3143          && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
3144        // We failed to find an aligned base that will reach.  Fall
3145        // back to using our requested addr.
3146        metaspace_rs = ReservedSpace(compressed_class_space_size(),
3147                                     _reserve_alignment,
3148                                     large_pages,
3149                                     requested_addr);
3150        break;
3151      }
3152#endif
3153
3154      metaspace_rs = ReservedSpace(compressed_class_space_size(),
3155                                   _reserve_alignment,
3156                                   large_pages,
3157                                   a);
3158      if (metaspace_rs.is_reserved())
3159        break;
3160    }
3161  }
3162
3163#endif // AARCH64
3164
3165  if (!metaspace_rs.is_reserved()) {
3166#if INCLUDE_CDS
3167    if (UseSharedSpaces) {
3168      size_t increment = align_size_up(1*G, _reserve_alignment);
3169
3170      // Keep trying to allocate the metaspace, increasing the requested_addr
3171      // by 1GB each time, until we reach an address that will no longer allow
3172      // use of CDS with compressed klass pointers.
3173      char *addr = requested_addr;
3174      while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
3175             can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
3176        addr = addr + increment;
3177        metaspace_rs = ReservedSpace(compressed_class_space_size(),
3178                                     _reserve_alignment, large_pages, addr);
3179      }
3180    }
3181#endif
3182    // If no successful allocation then try to allocate the space anywhere.  If
3183    // that fails then OOM doom.  At this point we cannot try allocating the
3184    // metaspace as if UseCompressedClassPointers is off because too much
3185    // initialization has happened that depends on UseCompressedClassPointers.
3186    // So, UseCompressedClassPointers cannot be turned off at this point.
3187    if (!metaspace_rs.is_reserved()) {
3188      metaspace_rs = ReservedSpace(compressed_class_space_size(),
3189                                   _reserve_alignment, large_pages);
3190      if (!metaspace_rs.is_reserved()) {
3191        vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
3192                                              compressed_class_space_size()));
3193      }
3194    }
3195  }
3196
3197  // If we got here then the metaspace got allocated.
3198  MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3199
3200#if INCLUDE_CDS
3201  // Verify that we can use shared spaces.  Otherwise, turn off CDS.
3202  if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3203    FileMapInfo::stop_sharing_and_unmap(
3204        "Could not allocate metaspace at a compatible address");
3205  }
3206#endif
3207  set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3208                                  UseSharedSpaces ? (address)cds_base : 0);
3209
3210  initialize_class_space(metaspace_rs);
3211
3212  if (log_is_enabled(Trace, gc, metaspace)) {
3213    Log(gc, metaspace) log;
3214    ResourceMark rm;
3215    print_compressed_class_space(log.trace_stream(), requested_addr);
3216  }
3217}
3218
3219void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
3220  st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
3221               p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
3222  if (_class_space_list != NULL) {
3223    address base = (address)_class_space_list->current_virtual_space()->bottom();
3224    st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
3225                 compressed_class_space_size(), p2i(base));
3226    if (requested_addr != 0) {
3227      st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
3228    }
3229    st->cr();
3230  }
3231}
3232
3233// For UseCompressedClassPointers the class space is reserved above the top of
3234// the Java heap.  The argument passed in is at the base of the compressed space.
3235void Metaspace::initialize_class_space(ReservedSpace rs) {
3236  // The reserved space size may be bigger because of alignment, esp with UseLargePages
3237  assert(rs.size() >= CompressedClassSpaceSize,
3238         SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
3239  assert(using_class_space(), "Must be using class space");
3240  _class_space_list = new VirtualSpaceList(rs);
3241  _chunk_manager_class = new ChunkManager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk);
3242
3243  if (!_class_space_list->initialization_succeeded()) {
3244    vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
3245  }
3246}
3247
3248#endif
3249
3250void Metaspace::ergo_initialize() {
3251  if (DumpSharedSpaces) {
3252    // Using large pages when dumping the shared archive is currently not implemented.
3253    FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
3254  }
3255
3256  size_t page_size = os::vm_page_size();
3257  if (UseLargePages && UseLargePagesInMetaspace) {
3258    page_size = os::large_page_size();
3259  }
3260
3261  _commit_alignment  = page_size;
3262  _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
3263
3264  // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
3265  // override if MaxMetaspaceSize was set on the command line or not.
3266  // This information is needed later to conform to the specification of the
3267  // java.lang.management.MemoryUsage API.
3268  //
3269  // Ideally, we would be able to set the default value of MaxMetaspaceSize in
3270  // globals.hpp to the aligned value, but this is not possible, since the
3271  // alignment depends on other flags being parsed.
3272  MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment);
3273
3274  if (MetaspaceSize > MaxMetaspaceSize) {
3275    MetaspaceSize = MaxMetaspaceSize;
3276  }
3277
3278  MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment);
3279
3280  assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
3281
3282  MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment);
3283  MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
3284
3285  CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
3286  set_compressed_class_space_size(CompressedClassSpaceSize);
3287}
3288
3289void Metaspace::global_initialize() {
3290  MetaspaceGC::initialize();
3291
3292  // Initialize the alignment for shared spaces.
3293  int max_alignment = os::vm_allocation_granularity();
3294  size_t cds_total = 0;
3295
3296  MetaspaceShared::set_max_alignment(max_alignment);
3297
3298  if (DumpSharedSpaces) {
3299#if INCLUDE_CDS
3300    MetaspaceShared::estimate_regions_size();
3301
3302    SharedReadOnlySize  = align_size_up(SharedReadOnlySize,  max_alignment);
3303    SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
3304    SharedMiscDataSize  = align_size_up(SharedMiscDataSize,  max_alignment);
3305    SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize,  max_alignment);
3306
3307    // Initialize with the sum of the shared space sizes.  The read-only
3308    // and read write metaspace chunks will be allocated out of this and the
3309    // remainder is the misc code and data chunks.
3310    cds_total = FileMapInfo::shared_spaces_size();
3311    cds_total = align_size_up(cds_total, _reserve_alignment);
3312    _space_list = new VirtualSpaceList(cds_total/wordSize);
3313    _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3314
3315    if (!_space_list->initialization_succeeded()) {
3316      vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3317    }
3318
3319#ifdef _LP64
3320    if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
3321      vm_exit_during_initialization("Unable to dump shared archive.",
3322          err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3323                  SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3324                  "klass limit: " UINT64_FORMAT, cds_total, compressed_class_space_size(),
3325                  cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
3326    }
3327
3328    // Set the compressed klass pointer base so that decoding of these pointers works
3329    // properly when creating the shared archive.
3330    assert(UseCompressedOops && UseCompressedClassPointers,
3331      "UseCompressedOops and UseCompressedClassPointers must be set");
3332    Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
3333    log_develop_trace(gc, metaspace)("Setting_narrow_klass_base to Address: " PTR_FORMAT,
3334                                     p2i(_space_list->current_virtual_space()->bottom()));
3335
3336    Universe::set_narrow_klass_shift(0);
3337#endif // _LP64
3338#endif // INCLUDE_CDS
3339  } else {
3340#if INCLUDE_CDS
3341    if (UseSharedSpaces) {
3342      // If using shared space, open the file that contains the shared space
3343      // and map in the memory before initializing the rest of metaspace (so
3344      // the addresses don't conflict)
3345      address cds_address = NULL;
3346      FileMapInfo* mapinfo = new FileMapInfo();
3347
3348      // Open the shared archive file, read and validate the header. If
3349      // initialization fails, shared spaces [UseSharedSpaces] are
3350      // disabled and the file is closed.
3351      // Map in spaces now also
3352      if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3353        cds_total = FileMapInfo::shared_spaces_size();
3354        cds_address = (address)mapinfo->header()->region_addr(0);
3355#ifdef _LP64
3356        if (using_class_space()) {
3357          char* cds_end = (char*)(cds_address + cds_total);
3358          cds_end = align_ptr_up(cds_end, _reserve_alignment);
3359          // If UseCompressedClassPointers is set then allocate the metaspace area
3360          // above the heap and above the CDS area (if it exists).
3361          allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3362          // Map the shared string space after compressed pointers
3363          // because it relies on compressed class pointers setting to work
3364          mapinfo->map_string_regions();
3365        }
3366#endif // _LP64
3367      } else {
3368        assert(!mapinfo->is_open() && !UseSharedSpaces,
3369               "archive file not closed or shared spaces not disabled.");
3370      }
3371    }
3372#endif // INCLUDE_CDS
3373
3374#ifdef _LP64
3375    if (!UseSharedSpaces && using_class_space()) {
3376      char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
3377      allocate_metaspace_compressed_klass_ptrs(base, 0);
3378    }
3379#endif // _LP64
3380
3381    // Initialize these before initializing the VirtualSpaceList
3382    _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3383    _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3384    // Make the first class chunk bigger than a medium chunk so it's not put
3385    // on the medium chunk list.   The next chunk will be small and progress
3386    // from there.  This size calculated by -version.
3387    _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3388                                       (CompressedClassSpaceSize/BytesPerWord)*2);
3389    _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3390    // Arbitrarily set the initial virtual space to a multiple
3391    // of the boot class loader size.
3392    size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3393    word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
3394
3395    // Initialize the list of virtual spaces.
3396    _space_list = new VirtualSpaceList(word_size);
3397    _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3398
3399    if (!_space_list->initialization_succeeded()) {
3400      vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
3401    }
3402  }
3403
3404  _tracer = new MetaspaceTracer();
3405}
3406
3407void Metaspace::post_initialize() {
3408  MetaspaceGC::post_initialize();
3409}
3410
3411void Metaspace::initialize_first_chunk(MetaspaceType type, MetadataType mdtype) {
3412  Metachunk* chunk = get_initialization_chunk(type, mdtype);
3413  if (chunk != NULL) {
3414    // Add to this manager's list of chunks in use and current_chunk().
3415    get_space_manager(mdtype)->add_chunk(chunk, true);
3416  }
3417}
3418
3419Metachunk* Metaspace::get_initialization_chunk(MetaspaceType type, MetadataType mdtype) {
3420  size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
3421
3422  // Get a chunk from the chunk freelist
3423  Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
3424
3425  if (chunk == NULL) {
3426    chunk = get_space_list(mdtype)->get_new_chunk(chunk_word_size,
3427                                                  get_space_manager(mdtype)->medium_chunk_bunch());
3428  }
3429
3430  // For dumping shared archive, report error if allocation has failed.
3431  if (DumpSharedSpaces && chunk == NULL) {
3432    report_insufficient_metaspace(MetaspaceAux::committed_bytes() + chunk_word_size * BytesPerWord);
3433  }
3434
3435  return chunk;
3436}
3437
3438void Metaspace::verify_global_initialization() {
3439  assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
3440  assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
3441
3442  if (using_class_space()) {
3443    assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
3444    assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
3445  }
3446}
3447
3448void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3449  verify_global_initialization();
3450
3451  // Allocate SpaceManager for metadata objects.
3452  _vsm = new SpaceManager(NonClassType, lock);
3453
3454  if (using_class_space()) {
3455    // Allocate SpaceManager for classes.
3456    _class_vsm = new SpaceManager(ClassType, lock);
3457  }
3458
3459  MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3460
3461  // Allocate chunk for metadata objects
3462  initialize_first_chunk(type, NonClassType);
3463
3464  // Allocate chunk for class metadata objects
3465  if (using_class_space()) {
3466    initialize_first_chunk(type, ClassType);
3467  }
3468
3469  _alloc_record_head = NULL;
3470  _alloc_record_tail = NULL;
3471}
3472
3473size_t Metaspace::align_word_size_up(size_t word_size) {
3474  size_t byte_size = word_size * wordSize;
3475  return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
3476}
3477
3478MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
3479  // DumpSharedSpaces doesn't use class metadata area (yet)
3480  // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
3481  if (is_class_space_allocation(mdtype)) {
3482    return  class_vsm()->allocate(word_size);
3483  } else {
3484    return  vsm()->allocate(word_size);
3485  }
3486}
3487
3488MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3489  size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3490  assert(delta_bytes > 0, "Must be");
3491
3492  size_t before = 0;
3493  size_t after = 0;
3494  MetaWord* res;
3495  bool incremented;
3496
3497  // Each thread increments the HWM at most once. Even if the thread fails to increment
3498  // the HWM, an allocation is still attempted. This is because another thread must then
3499  // have incremented the HWM and therefore the allocation might still succeed.
3500  do {
3501    incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
3502    res = allocate(word_size, mdtype);
3503  } while (!incremented && res == NULL);
3504
3505  if (incremented) {
3506    tracer()->report_gc_threshold(before, after,
3507                                  MetaspaceGCThresholdUpdater::ExpandAndAllocate);
3508    log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
3509  }
3510
3511  return res;
3512}
3513
3514// Space allocated in the Metaspace.  This may
3515// be across several metadata virtual spaces.
3516char* Metaspace::bottom() const {
3517  assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
3518  return (char*)vsm()->current_chunk()->bottom();
3519}
3520
3521size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3522  if (mdtype == ClassType) {
3523    return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3524  } else {
3525    return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
3526  }
3527}
3528
3529size_t Metaspace::free_words_slow(MetadataType mdtype) const {
3530  if (mdtype == ClassType) {
3531    return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
3532  } else {
3533    return vsm()->sum_free_in_chunks_in_use();
3534  }
3535}
3536
3537// Space capacity in the Metaspace.  It includes
3538// space in the list of chunks from which allocations
3539// have been made. Don't include space in the global freelist and
3540// in the space available in the dictionary which
3541// is already counted in some chunk.
3542size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
3543  if (mdtype == ClassType) {
3544    return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
3545  } else {
3546    return vsm()->sum_capacity_in_chunks_in_use();
3547  }
3548}
3549
3550size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
3551  return used_words_slow(mdtype) * BytesPerWord;
3552}
3553
3554size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
3555  return capacity_words_slow(mdtype) * BytesPerWord;
3556}
3557
3558size_t Metaspace::allocated_blocks_bytes() const {
3559  return vsm()->allocated_blocks_bytes() +
3560      (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
3561}
3562
3563size_t Metaspace::allocated_chunks_bytes() const {
3564  return vsm()->allocated_chunks_bytes() +
3565      (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
3566}
3567
3568void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
3569  assert(!SafepointSynchronize::is_at_safepoint()
3570         || Thread::current()->is_VM_thread(), "should be the VM thread");
3571
3572  if (DumpSharedSpaces && log_is_enabled(Info, cds)) {
3573    record_deallocation(ptr, vsm()->get_allocation_word_size(word_size));
3574  }
3575
3576  MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3577
3578  if (is_class && using_class_space()) {
3579    class_vsm()->deallocate(ptr, word_size);
3580  } else {
3581    vsm()->deallocate(ptr, word_size);
3582  }
3583}
3584
3585
3586MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3587                              bool read_only, MetaspaceObj::Type type, TRAPS) {
3588  if (HAS_PENDING_EXCEPTION) {
3589    assert(false, "Should not allocate with exception pending");
3590    return NULL;  // caller does a CHECK_NULL too
3591  }
3592
3593  assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3594        "ClassLoaderData::the_null_class_loader_data() should have been used.");
3595
3596  // Allocate in metaspaces without taking out a lock, because it deadlocks
3597  // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
3598  // to revisit this for application class data sharing.
3599  if (DumpSharedSpaces) {
3600    assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3601    Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3602    MetaWord* result = space->allocate(word_size, NonClassType);
3603    if (result == NULL) {
3604      report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3605    }
3606    if (log_is_enabled(Info, cds)) {
3607      space->record_allocation(result, type, space->vsm()->get_allocation_word_size(word_size));
3608    }
3609
3610    // Zero initialize.
3611    Copy::fill_to_words((HeapWord*)result, word_size, 0);
3612
3613    return result;
3614  }
3615
3616  MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3617
3618  // Try to allocate metadata.
3619  MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3620
3621  if (result == NULL) {
3622    tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
3623
3624    // Allocation failed.
3625    if (is_init_completed()) {
3626      // Only start a GC if the bootstrapping has completed.
3627
3628      // Try to clean out some memory and retry.
3629      result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3630          loader_data, word_size, mdtype);
3631    }
3632  }
3633
3634  if (result == NULL) {
3635    SpaceManager* sm;
3636    if (is_class_space_allocation(mdtype)) {
3637      sm = loader_data->metaspace_non_null()->class_vsm();
3638    } else {
3639      sm = loader_data->metaspace_non_null()->vsm();
3640    }
3641
3642    result = sm->get_small_chunk_and_allocate(word_size);
3643
3644    if (result == NULL) {
3645      report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
3646    }
3647  }
3648
3649  // Zero initialize.
3650  Copy::fill_to_words((HeapWord*)result, word_size, 0);
3651
3652  return result;
3653}
3654
3655size_t Metaspace::class_chunk_size(size_t word_size) {
3656  assert(using_class_space(), "Has to use class space");
3657  return class_vsm()->calc_chunk_size(word_size);
3658}
3659
3660void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
3661  tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
3662
3663  // If result is still null, we are out of memory.
3664  Log(gc, metaspace, freelist) log;
3665  if (log.is_info()) {
3666    log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
3667             is_class_space_allocation(mdtype) ? "class" : "data", word_size);
3668    ResourceMark rm;
3669    outputStream* out = log.info_stream();
3670    if (loader_data->metaspace_or_null() != NULL) {
3671      loader_data->dump(out);
3672    }
3673    MetaspaceAux::dump(out);
3674  }
3675
3676  bool out_of_compressed_class_space = false;
3677  if (is_class_space_allocation(mdtype)) {
3678    Metaspace* metaspace = loader_data->metaspace_non_null();
3679    out_of_compressed_class_space =
3680      MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3681      (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3682      CompressedClassSpaceSize;
3683  }
3684
3685  // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3686  const char* space_string = out_of_compressed_class_space ?
3687    "Compressed class space" : "Metaspace";
3688
3689  report_java_out_of_memory(space_string);
3690
3691  if (JvmtiExport::should_post_resource_exhausted()) {
3692    JvmtiExport::post_resource_exhausted(
3693        JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3694        space_string);
3695  }
3696
3697  if (!is_init_completed()) {
3698    vm_exit_during_initialization("OutOfMemoryError", space_string);
3699  }
3700
3701  if (out_of_compressed_class_space) {
3702    THROW_OOP(Universe::out_of_memory_error_class_metaspace());
3703  } else {
3704    THROW_OOP(Universe::out_of_memory_error_metaspace());
3705  }
3706}
3707
3708const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
3709  switch (mdtype) {
3710    case Metaspace::ClassType: return "Class";
3711    case Metaspace::NonClassType: return "Metadata";
3712    default:
3713      assert(false, "Got bad mdtype: %d", (int) mdtype);
3714      return NULL;
3715  }
3716}
3717
3718void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3719  assert(DumpSharedSpaces, "sanity");
3720
3721  int byte_size = (int)word_size * wordSize;
3722  AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size);
3723
3724  if (_alloc_record_head == NULL) {
3725    _alloc_record_head = _alloc_record_tail = rec;
3726  } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) {
3727    _alloc_record_tail->_next = rec;
3728    _alloc_record_tail = rec;
3729  } else {
3730    // slow linear search, but this doesn't happen that often, and only when dumping
3731    for (AllocRecord *old = _alloc_record_head; old; old = old->_next) {
3732      if (old->_ptr == ptr) {
3733        assert(old->_type == MetaspaceObj::DeallocatedType, "sanity");
3734        int remain_bytes = old->_byte_size - byte_size;
3735        assert(remain_bytes >= 0, "sanity");
3736        old->_type = type;
3737
3738        if (remain_bytes == 0) {
3739          delete(rec);
3740        } else {
3741          address remain_ptr = address(ptr) + byte_size;
3742          rec->_ptr = remain_ptr;
3743          rec->_byte_size = remain_bytes;
3744          rec->_type = MetaspaceObj::DeallocatedType;
3745          rec->_next = old->_next;
3746          old->_byte_size = byte_size;
3747          old->_next = rec;
3748        }
3749        return;
3750      }
3751    }
3752    assert(0, "reallocating a freed pointer that was not recorded");
3753  }
3754}
3755
3756void Metaspace::record_deallocation(void* ptr, size_t word_size) {
3757  assert(DumpSharedSpaces, "sanity");
3758
3759  for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3760    if (rec->_ptr == ptr) {
3761      assert(rec->_byte_size == (int)word_size * wordSize, "sanity");
3762      rec->_type = MetaspaceObj::DeallocatedType;
3763      return;
3764    }
3765  }
3766
3767  assert(0, "deallocating a pointer that was not recorded");
3768}
3769
3770void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3771  assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3772
3773  address last_addr = (address)bottom();
3774
3775  for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3776    address ptr = rec->_ptr;
3777    if (last_addr < ptr) {
3778      closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
3779    }
3780    closure->doit(ptr, rec->_type, rec->_byte_size);
3781    last_addr = ptr + rec->_byte_size;
3782  }
3783
3784  address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
3785  if (last_addr < top) {
3786    closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
3787  }
3788}
3789
3790void Metaspace::purge(MetadataType mdtype) {
3791  get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
3792}
3793
3794void Metaspace::purge() {
3795  MutexLockerEx cl(SpaceManager::expand_lock(),
3796                   Mutex::_no_safepoint_check_flag);
3797  purge(NonClassType);
3798  if (using_class_space()) {
3799    purge(ClassType);
3800  }
3801}
3802
3803void Metaspace::print_on(outputStream* out) const {
3804  // Print both class virtual space counts and metaspace.
3805  if (Verbose) {
3806    vsm()->print_on(out);
3807    if (using_class_space()) {
3808      class_vsm()->print_on(out);
3809    }
3810  }
3811}
3812
3813bool Metaspace::contains(const void* ptr) {
3814  if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) {
3815    return true;
3816  }
3817  return contains_non_shared(ptr);
3818}
3819
3820bool Metaspace::contains_non_shared(const void* ptr) {
3821  if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
3822     return true;
3823  }
3824
3825  return get_space_list(NonClassType)->contains(ptr);
3826}
3827
3828void Metaspace::verify() {
3829  vsm()->verify();
3830  if (using_class_space()) {
3831    class_vsm()->verify();
3832  }
3833}
3834
3835void Metaspace::dump(outputStream* const out) const {
3836  out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm()));
3837  vsm()->dump(out);
3838  if (using_class_space()) {
3839    out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm()));
3840    class_vsm()->dump(out);
3841  }
3842}
3843
3844/////////////// Unit tests ///////////////
3845
3846#ifndef PRODUCT
3847
3848class TestMetaspaceAuxTest : AllStatic {
3849 public:
3850  static void test_reserved() {
3851    size_t reserved = MetaspaceAux::reserved_bytes();
3852
3853    assert(reserved > 0, "assert");
3854
3855    size_t committed  = MetaspaceAux::committed_bytes();
3856    assert(committed <= reserved, "assert");
3857
3858    size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
3859    assert(reserved_metadata > 0, "assert");
3860    assert(reserved_metadata <= reserved, "assert");
3861
3862    if (UseCompressedClassPointers) {
3863      size_t reserved_class    = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
3864      assert(reserved_class > 0, "assert");
3865      assert(reserved_class < reserved, "assert");
3866    }
3867  }
3868
3869  static void test_committed() {
3870    size_t committed = MetaspaceAux::committed_bytes();
3871
3872    assert(committed > 0, "assert");
3873
3874    size_t reserved  = MetaspaceAux::reserved_bytes();
3875    assert(committed <= reserved, "assert");
3876
3877    size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
3878    assert(committed_metadata > 0, "assert");
3879    assert(committed_metadata <= committed, "assert");
3880
3881    if (UseCompressedClassPointers) {
3882      size_t committed_class    = MetaspaceAux::committed_bytes(Metaspace::ClassType);
3883      assert(committed_class > 0, "assert");
3884      assert(committed_class < committed, "assert");
3885    }
3886  }
3887
3888  static void test_virtual_space_list_large_chunk() {
3889    VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
3890    MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3891    // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
3892    // vm_allocation_granularity aligned on Windows.
3893    size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
3894    large_size += (os::vm_page_size()/BytesPerWord);
3895    vs_list->get_new_chunk(large_size, 0);
3896  }
3897
3898  static void test() {
3899    test_reserved();
3900    test_committed();
3901    test_virtual_space_list_large_chunk();
3902  }
3903};
3904
3905void TestMetaspaceAux_test() {
3906  TestMetaspaceAuxTest::test();
3907}
3908
3909class TestVirtualSpaceNodeTest {
3910  static void chunk_up(size_t words_left, size_t& num_medium_chunks,
3911                                          size_t& num_small_chunks,
3912                                          size_t& num_specialized_chunks) {
3913    num_medium_chunks = words_left / MediumChunk;
3914    words_left = words_left % MediumChunk;
3915
3916    num_small_chunks = words_left / SmallChunk;
3917    words_left = words_left % SmallChunk;
3918    // how many specialized chunks can we get?
3919    num_specialized_chunks = words_left / SpecializedChunk;
3920    assert(words_left % SpecializedChunk == 0, "should be nothing left");
3921  }
3922
3923 public:
3924  static void test() {
3925    MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3926    const size_t vsn_test_size_words = MediumChunk  * 4;
3927    const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
3928
3929    // The chunk sizes must be multiples of eachother, or this will fail
3930    STATIC_ASSERT(MediumChunk % SmallChunk == 0);
3931    STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
3932
3933    { // No committed memory in VSN
3934      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3935      VirtualSpaceNode vsn(vsn_test_size_bytes);
3936      vsn.initialize();
3937      vsn.retire(&cm);
3938      assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
3939    }
3940
3941    { // All of VSN is committed, half is used by chunks
3942      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3943      VirtualSpaceNode vsn(vsn_test_size_bytes);
3944      vsn.initialize();
3945      vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
3946      vsn.get_chunk_vs(MediumChunk);
3947      vsn.get_chunk_vs(MediumChunk);
3948      vsn.retire(&cm);
3949      assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
3950      assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
3951    }
3952
3953    const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
3954    // This doesn't work for systems with vm_page_size >= 16K.
3955    if (page_chunks < MediumChunk) {
3956      // 4 pages of VSN is committed, some is used by chunks
3957      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3958      VirtualSpaceNode vsn(vsn_test_size_bytes);
3959
3960      vsn.initialize();
3961      vsn.expand_by(page_chunks, page_chunks);
3962      vsn.get_chunk_vs(SmallChunk);
3963      vsn.get_chunk_vs(SpecializedChunk);
3964      vsn.retire(&cm);
3965
3966      // committed - used = words left to retire
3967      const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
3968
3969      size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3970      chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3971
3972      assert(num_medium_chunks == 0, "should not get any medium chunks");
3973      assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3974      assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3975    }
3976
3977    { // Half of VSN is committed, a humongous chunk is used
3978      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3979      VirtualSpaceNode vsn(vsn_test_size_bytes);
3980      vsn.initialize();
3981      vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
3982      vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
3983      vsn.retire(&cm);
3984
3985      const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
3986      size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3987      chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3988
3989      assert(num_medium_chunks == 0, "should not get any medium chunks");
3990      assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3991      assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3992    }
3993
3994  }
3995
3996#define assert_is_available_positive(word_size) \
3997  assert(vsn.is_available(word_size), \
3998         #word_size ": " PTR_FORMAT " bytes were not available in " \
3999         "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
4000         (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
4001
4002#define assert_is_available_negative(word_size) \
4003  assert(!vsn.is_available(word_size), \
4004         #word_size ": " PTR_FORMAT " bytes should not be available in " \
4005         "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
4006         (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
4007
4008  static void test_is_available_positive() {
4009    // Reserve some memory.
4010    VirtualSpaceNode vsn(os::vm_allocation_granularity());
4011    assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
4012
4013    // Commit some memory.
4014    size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
4015    bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
4016    assert(expanded, "Failed to commit");
4017
4018    // Check that is_available accepts the committed size.
4019    assert_is_available_positive(commit_word_size);
4020
4021    // Check that is_available accepts half the committed size.
4022    size_t expand_word_size = commit_word_size / 2;
4023    assert_is_available_positive(expand_word_size);
4024  }
4025
4026  static void test_is_available_negative() {
4027    // Reserve some memory.
4028    VirtualSpaceNode vsn(os::vm_allocation_granularity());
4029    assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
4030
4031    // Commit some memory.
4032    size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
4033    bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
4034    assert(expanded, "Failed to commit");
4035
4036    // Check that is_available doesn't accept a too large size.
4037    size_t two_times_commit_word_size = commit_word_size * 2;
4038    assert_is_available_negative(two_times_commit_word_size);
4039  }
4040
4041  static void test_is_available_overflow() {
4042    // Reserve some memory.
4043    VirtualSpaceNode vsn(os::vm_allocation_granularity());
4044    assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
4045
4046    // Commit some memory.
4047    size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
4048    bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
4049    assert(expanded, "Failed to commit");
4050
4051    // Calculate a size that will overflow the virtual space size.
4052    void* virtual_space_max = (void*)(uintptr_t)-1;
4053    size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
4054    size_t overflow_size = bottom_to_max + BytesPerWord;
4055    size_t overflow_word_size = overflow_size / BytesPerWord;
4056
4057    // Check that is_available can handle the overflow.
4058    assert_is_available_negative(overflow_word_size);
4059  }
4060
4061  static void test_is_available() {
4062    TestVirtualSpaceNodeTest::test_is_available_positive();
4063    TestVirtualSpaceNodeTest::test_is_available_negative();
4064    TestVirtualSpaceNodeTest::test_is_available_overflow();
4065  }
4066};
4067
4068void TestVirtualSpaceNode_test() {
4069  TestVirtualSpaceNodeTest::test();
4070  TestVirtualSpaceNodeTest::test_is_available();
4071}
4072
4073// The following test is placed here instead of a gtest / unittest file
4074// because the ChunkManager class is only available in this file.
4075void ChunkManager_test_list_index() {
4076  ChunkManager manager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk);
4077
4078  // Test previous bug where a query for a humongous class metachunk,
4079  // incorrectly matched the non-class medium metachunk size.
4080  {
4081    assert(MediumChunk > ClassMediumChunk, "Precondition for test");
4082
4083    ChunkIndex index = manager.list_index(MediumChunk);
4084
4085    assert(index == HumongousIndex,
4086           "Requested size is larger than ClassMediumChunk,"
4087           " so should return HumongousIndex. Got index: %d", (int)index);
4088  }
4089
4090  // Check the specified sizes as well.
4091  {
4092    ChunkIndex index = manager.list_index(ClassSpecializedChunk);
4093    assert(index == SpecializedIndex, "Wrong index returned. Got index: %d", (int)index);
4094  }
4095  {
4096    ChunkIndex index = manager.list_index(ClassSmallChunk);
4097    assert(index == SmallIndex, "Wrong index returned. Got index: %d", (int)index);
4098  }
4099  {
4100    ChunkIndex index = manager.list_index(ClassMediumChunk);
4101    assert(index == MediumIndex, "Wrong index returned. Got index: %d", (int)index);
4102  }
4103  {
4104    ChunkIndex index = manager.list_index(ClassMediumChunk + 1);
4105    assert(index == HumongousIndex, "Wrong index returned. Got index: %d", (int)index);
4106  }
4107}
4108
4109#endif // !PRODUCT
4110
4111#ifdef ASSERT
4112
4113// ChunkManagerReturnTest stresses taking/returning chunks from the ChunkManager. It takes and
4114// returns chunks from/to the ChunkManager while keeping track of the expected ChunkManager
4115// content.
4116class ChunkManagerReturnTestImpl {
4117
4118  VirtualSpaceNode _vsn;
4119  ChunkManager _cm;
4120
4121  // The expected content of the chunk manager.
4122  unsigned _chunks_in_chunkmanager;
4123  size_t _words_in_chunkmanager;
4124
4125  // A fixed size pool of chunks. Chunks may be in the chunk manager (free) or not (in use).
4126  static const int num_chunks = 256;
4127  Metachunk* _pool[num_chunks];
4128
4129  // Helper, return a random position into the chunk pool.
4130  static int get_random_position() {
4131    return os::random() % num_chunks;
4132  }
4133
4134  // Asserts that ChunkManager counters match expectations.
4135  void assert_counters() {
4136    assert(_vsn.container_count() == num_chunks - _chunks_in_chunkmanager, "vsn counter mismatch.");
4137    assert(_cm.free_chunks_count() == _chunks_in_chunkmanager, "cm counter mismatch.");
4138    assert(_cm.free_chunks_total_words() == _words_in_chunkmanager, "cm counter mismatch.");
4139  }
4140
4141  // Get a random chunk size. Equal chance to get spec/med/small chunk size or
4142  // a humongous chunk size. The latter itself is random in the range of [med+spec..4*med).
4143  size_t get_random_chunk_size() {
4144    const size_t sizes [] = { SpecializedChunk, SmallChunk, MediumChunk };
4145    const int rand = os::random() % 4;
4146    if (rand < 3) {
4147      return sizes[rand];
4148    } else {
4149      // Note: this affects the max. size of space (see _vsn initialization in ctor).
4150      return align_size_up(MediumChunk + 1 + (os::random() % (MediumChunk * 4)), SpecializedChunk);
4151    }
4152  }
4153
4154  // Starting at pool index <start>+1, find the next chunk tagged as either free or in use, depending
4155  // on <is_free>. Search wraps. Returns its position, or -1 if no matching chunk was found.
4156  int next_matching_chunk(int start, bool is_free) const {
4157    assert(start >= 0 && start < num_chunks, "invalid parameter");
4158    int pos = start;
4159    do {
4160      if (++pos == num_chunks) {
4161        pos = 0;
4162      }
4163      if (_pool[pos]->is_tagged_free() == is_free) {
4164        return pos;
4165      }
4166    } while (pos != start);
4167    return -1;
4168  }
4169
4170  // A structure to keep information about a chunk list including which
4171  // chunks are part of this list. This is needed to keep information about a chunk list
4172  // we will to return to the ChunkManager, because the original list will be destroyed.
4173  struct AChunkList {
4174    Metachunk* head;
4175    Metachunk* all[num_chunks];
4176    size_t size;
4177    int num;
4178    ChunkIndex index;
4179  };
4180
4181  // Assemble, from the in-use chunks (not in the chunk manager) in the pool,
4182  // a random chunk list of max. length <list_size> of chunks with the same
4183  // ChunkIndex (chunk size).
4184  // Returns false if list cannot be assembled. List is returned in the <out>
4185  // structure. Returned list may be smaller than <list_size>.
4186  bool assemble_random_chunklist(AChunkList* out, int list_size) {
4187    // Choose a random in-use chunk from the pool...
4188    const int headpos = next_matching_chunk(get_random_position(), false);
4189    if (headpos == -1) {
4190      return false;
4191    }
4192    Metachunk* const head = _pool[headpos];
4193    out->all[0] = head;
4194    assert(head->is_tagged_free() == false, "Chunk state mismatch");
4195    // ..then go from there, chain it up with up to list_size - 1 number of other
4196    // in-use chunks of the same index.
4197    const ChunkIndex index = _cm.list_index(head->word_size());
4198    int num_added = 1;
4199    size_t size_added = head->word_size();
4200    int pos = headpos;
4201    Metachunk* tail = head;
4202    do {
4203      pos = next_matching_chunk(pos, false);
4204      if (pos != headpos) {
4205        Metachunk* c = _pool[pos];
4206        assert(c->is_tagged_free() == false, "Chunk state mismatch");
4207        if (index == _cm.list_index(c->word_size())) {
4208          tail->set_next(c);
4209          c->set_prev(tail);
4210          tail = c;
4211          out->all[num_added] = c;
4212          num_added ++;
4213          size_added += c->word_size();
4214        }
4215      }
4216    } while (num_added < list_size && pos != headpos);
4217    out->head = head;
4218    out->index = index;
4219    out->size = size_added;
4220    out->num = num_added;
4221    return true;
4222  }
4223
4224  // Take a single random chunk from the ChunkManager.
4225  bool take_single_random_chunk_from_chunkmanager() {
4226    assert_counters();
4227    _cm.locked_verify();
4228    int pos = next_matching_chunk(get_random_position(), true);
4229    if (pos == -1) {
4230      return false;
4231    }
4232    Metachunk* c = _pool[pos];
4233    assert(c->is_tagged_free(), "Chunk state mismatch");
4234    // Note: instead of using ChunkManager::remove_chunk on this one chunk, we call
4235    // ChunkManager::free_chunks_get() with this chunk's word size. We really want
4236    // to exercise ChunkManager::free_chunks_get() because that one gets called for
4237    // normal chunk allocation.
4238    Metachunk* c2 = _cm.free_chunks_get(c->word_size());
4239    assert(c2 != NULL, "Unexpected.");
4240    assert(!c2->is_tagged_free(), "Chunk state mismatch");
4241    assert(c2->next() == NULL && c2->prev() == NULL, "Chunk should be outside of a list.");
4242    _chunks_in_chunkmanager --;
4243    _words_in_chunkmanager -= c->word_size();
4244    assert_counters();
4245    _cm.locked_verify();
4246    return true;
4247  }
4248
4249  // Returns a single random chunk to the chunk manager. Returns false if that
4250  // was not possible (all chunks are already in the chunk manager).
4251  bool return_single_random_chunk_to_chunkmanager() {
4252    assert_counters();
4253    _cm.locked_verify();
4254    int pos = next_matching_chunk(get_random_position(), false);
4255    if (pos == -1) {
4256      return false;
4257    }
4258    Metachunk* c = _pool[pos];
4259    assert(c->is_tagged_free() == false, "wrong chunk information");
4260    _cm.return_single_chunk(_cm.list_index(c->word_size()), c);
4261    _chunks_in_chunkmanager ++;
4262    _words_in_chunkmanager += c->word_size();
4263    assert(c->is_tagged_free() == true, "wrong chunk information");
4264    assert_counters();
4265    _cm.locked_verify();
4266    return true;
4267  }
4268
4269  // Return a random chunk list to the chunk manager. Returns the length of the
4270  // returned list.
4271  int return_random_chunk_list_to_chunkmanager(int list_size) {
4272    assert_counters();
4273    _cm.locked_verify();
4274    AChunkList aChunkList;
4275    if (!assemble_random_chunklist(&aChunkList, list_size)) {
4276      return 0;
4277    }
4278    // Before returning chunks are returned, they should be tagged in use.
4279    for (int i = 0; i < aChunkList.num; i ++) {
4280      assert(!aChunkList.all[i]->is_tagged_free(), "chunk state mismatch.");
4281    }
4282    _cm.return_chunk_list(aChunkList.index, aChunkList.head);
4283    _chunks_in_chunkmanager += aChunkList.num;
4284    _words_in_chunkmanager += aChunkList.size;
4285    // After all chunks are returned, check that they are now tagged free.
4286    for (int i = 0; i < aChunkList.num; i ++) {
4287      assert(aChunkList.all[i]->is_tagged_free(), "chunk state mismatch.");
4288    }
4289    assert_counters();
4290    _cm.locked_verify();
4291    return aChunkList.num;
4292  }
4293
4294public:
4295
4296  ChunkManagerReturnTestImpl()
4297    : _vsn(align_size_up(MediumChunk * num_chunks * 5 * sizeof(MetaWord), Metaspace::reserve_alignment()))
4298    , _cm(SpecializedChunk, SmallChunk, MediumChunk)
4299    , _chunks_in_chunkmanager(0)
4300    , _words_in_chunkmanager(0)
4301  {
4302    MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4303    // Allocate virtual space and allocate random chunks. Keep these chunks in the _pool. These chunks are
4304    // "in use", because not yet added to any chunk manager.
4305    _vsn.initialize();
4306    _vsn.expand_by(_vsn.reserved_words(), _vsn.reserved_words());
4307    for (int i = 0; i < num_chunks; i ++) {
4308      const size_t size = get_random_chunk_size();
4309      _pool[i] = _vsn.get_chunk_vs(size);
4310      assert(_pool[i] != NULL, "allocation failed");
4311    }
4312    assert_counters();
4313    _cm.locked_verify();
4314  }
4315
4316  // Test entry point.
4317  // Return some chunks to the chunk manager (return phase). Take some chunks out (take phase). Repeat.
4318  // Chunks are choosen randomly. Number of chunks to return or taken are choosen randomly, but affected
4319  // by the <phase_length_factor> argument: a factor of 0.0 will cause the test to quickly alternate between
4320  // returning and taking, whereas a factor of 1.0 will take/return all chunks from/to the
4321  // chunks manager, thereby emptying or filling it completely.
4322  void do_test(float phase_length_factor) {
4323    MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4324    assert_counters();
4325    // Execute n operations, and operation being the move of a single chunk to/from the chunk manager.
4326    const int num_max_ops = num_chunks * 100;
4327    int num_ops = num_max_ops;
4328    const int average_phase_length = (int)(phase_length_factor * num_chunks);
4329    int num_ops_until_switch = MAX2(1, (average_phase_length + os::random() % 8 - 4));
4330    bool return_phase = true;
4331    while (num_ops > 0) {
4332      int chunks_moved = 0;
4333      if (return_phase) {
4334        // Randomly switch between returning a single chunk or a random length chunk list.
4335        if (os::random() % 2 == 0) {
4336          if (return_single_random_chunk_to_chunkmanager()) {
4337            chunks_moved = 1;
4338          }
4339        } else {
4340          const int list_length = MAX2(1, (os::random() % num_ops_until_switch));
4341          chunks_moved = return_random_chunk_list_to_chunkmanager(list_length);
4342        }
4343      } else {
4344        // Breath out.
4345        if (take_single_random_chunk_from_chunkmanager()) {
4346          chunks_moved = 1;
4347        }
4348      }
4349      num_ops -= chunks_moved;
4350      num_ops_until_switch -= chunks_moved;
4351      if (chunks_moved == 0 || num_ops_until_switch <= 0) {
4352        return_phase = !return_phase;
4353        num_ops_until_switch = MAX2(1, (average_phase_length + os::random() % 8 - 4));
4354      }
4355    }
4356  }
4357};
4358
4359void* setup_chunkmanager_returntests() {
4360  ChunkManagerReturnTestImpl* p = new ChunkManagerReturnTestImpl();
4361  return p;
4362}
4363
4364void teardown_chunkmanager_returntests(void* p) {
4365  delete (ChunkManagerReturnTestImpl*) p;
4366}
4367
4368void run_chunkmanager_returntests(void* p, float phase_length) {
4369  ChunkManagerReturnTestImpl* test = (ChunkManagerReturnTestImpl*) p;
4370  test->do_test(phase_length);
4371}
4372
4373// The following test is placed here instead of a gtest / unittest file
4374// because the ChunkManager class is only available in this file.
4375class SpaceManagerTest : AllStatic {
4376  friend void SpaceManager_test_adjust_initial_chunk_size();
4377
4378  static void test_adjust_initial_chunk_size(bool is_class) {
4379    const size_t smallest = SpaceManager::smallest_chunk_size(is_class);
4380    const size_t normal   = SpaceManager::small_chunk_size(is_class);
4381    const size_t medium   = SpaceManager::medium_chunk_size(is_class);
4382
4383#define test_adjust_initial_chunk_size(value, expected, is_class_value)          \
4384    do {                                                                         \
4385      size_t v = value;                                                          \
4386      size_t e = expected;                                                       \
4387      assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e,  \
4388             "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v);               \
4389    } while (0)
4390
4391    // Smallest (specialized)
4392    test_adjust_initial_chunk_size(1,            smallest, is_class);
4393    test_adjust_initial_chunk_size(smallest - 1, smallest, is_class);
4394    test_adjust_initial_chunk_size(smallest,     smallest, is_class);
4395
4396    // Small
4397    test_adjust_initial_chunk_size(smallest + 1, normal, is_class);
4398    test_adjust_initial_chunk_size(normal - 1,   normal, is_class);
4399    test_adjust_initial_chunk_size(normal,       normal, is_class);
4400
4401    // Medium
4402    test_adjust_initial_chunk_size(normal + 1, medium, is_class);
4403    test_adjust_initial_chunk_size(medium - 1, medium, is_class);
4404    test_adjust_initial_chunk_size(medium,     medium, is_class);
4405
4406    // Humongous
4407    test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class);
4408
4409#undef test_adjust_initial_chunk_size
4410  }
4411
4412  static void test_adjust_initial_chunk_size() {
4413    test_adjust_initial_chunk_size(false);
4414    test_adjust_initial_chunk_size(true);
4415  }
4416};
4417
4418void SpaceManager_test_adjust_initial_chunk_size() {
4419  SpaceManagerTest::test_adjust_initial_chunk_size();
4420}
4421
4422#endif // ASSERT
4423