metaspace.cpp revision 6402:2377269bd73d
1/*
2 * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24#include "precompiled.hpp"
25#include "gc_interface/collectedHeap.hpp"
26#include "memory/allocation.hpp"
27#include "memory/binaryTreeDictionary.hpp"
28#include "memory/freeList.hpp"
29#include "memory/collectorPolicy.hpp"
30#include "memory/filemap.hpp"
31#include "memory/freeList.hpp"
32#include "memory/gcLocker.hpp"
33#include "memory/metachunk.hpp"
34#include "memory/metaspace.hpp"
35#include "memory/metaspaceGCThresholdUpdater.hpp"
36#include "memory/metaspaceShared.hpp"
37#include "memory/metaspaceTracer.hpp"
38#include "memory/resourceArea.hpp"
39#include "memory/universe.hpp"
40#include "runtime/atomic.inline.hpp"
41#include "runtime/globals.hpp"
42#include "runtime/init.hpp"
43#include "runtime/java.hpp"
44#include "runtime/mutex.hpp"
45#include "runtime/orderAccess.inline.hpp"
46#include "services/memTracker.hpp"
47#include "services/memoryService.hpp"
48#include "utilities/copy.hpp"
49#include "utilities/debug.hpp"
50
51typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
52typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
53
54// Set this constant to enable slow integrity checking of the free chunk lists
55const bool metaspace_slow_verify = false;
56
57size_t const allocation_from_dictionary_limit = 4 * K;
58
59MetaWord* last_allocated = 0;
60
61size_t Metaspace::_compressed_class_space_size;
62const MetaspaceTracer* Metaspace::_tracer = NULL;
63
64// Used in declarations in SpaceManager and ChunkManager
65enum ChunkIndex {
66  ZeroIndex = 0,
67  SpecializedIndex = ZeroIndex,
68  SmallIndex = SpecializedIndex + 1,
69  MediumIndex = SmallIndex + 1,
70  HumongousIndex = MediumIndex + 1,
71  NumberOfFreeLists = 3,
72  NumberOfInUseLists = 4
73};
74
75enum ChunkSizes {    // in words.
76  ClassSpecializedChunk = 128,
77  SpecializedChunk = 128,
78  ClassSmallChunk = 256,
79  SmallChunk = 512,
80  ClassMediumChunk = 4 * K,
81  MediumChunk = 8 * K
82};
83
84static ChunkIndex next_chunk_index(ChunkIndex i) {
85  assert(i < NumberOfInUseLists, "Out of bound");
86  return (ChunkIndex) (i+1);
87}
88
89volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
90uint MetaspaceGC::_shrink_factor = 0;
91bool MetaspaceGC::_should_concurrent_collect = false;
92
93typedef class FreeList<Metachunk> ChunkList;
94
95// Manages the global free lists of chunks.
96class ChunkManager : public CHeapObj<mtInternal> {
97  friend class TestVirtualSpaceNodeTest;
98
99  // Free list of chunks of different sizes.
100  //   SpecializedChunk
101  //   SmallChunk
102  //   MediumChunk
103  //   HumongousChunk
104  ChunkList _free_chunks[NumberOfFreeLists];
105
106  //   HumongousChunk
107  ChunkTreeDictionary _humongous_dictionary;
108
109  // ChunkManager in all lists of this type
110  size_t _free_chunks_total;
111  size_t _free_chunks_count;
112
113  void dec_free_chunks_total(size_t v) {
114    assert(_free_chunks_count > 0 &&
115             _free_chunks_total > 0,
116             "About to go negative");
117    Atomic::add_ptr(-1, &_free_chunks_count);
118    jlong minus_v = (jlong) - (jlong) v;
119    Atomic::add_ptr(minus_v, &_free_chunks_total);
120  }
121
122  // Debug support
123
124  size_t sum_free_chunks();
125  size_t sum_free_chunks_count();
126
127  void locked_verify_free_chunks_total();
128  void slow_locked_verify_free_chunks_total() {
129    if (metaspace_slow_verify) {
130      locked_verify_free_chunks_total();
131    }
132  }
133  void locked_verify_free_chunks_count();
134  void slow_locked_verify_free_chunks_count() {
135    if (metaspace_slow_verify) {
136      locked_verify_free_chunks_count();
137    }
138  }
139  void verify_free_chunks_count();
140
141 public:
142
143  ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
144      : _free_chunks_total(0), _free_chunks_count(0) {
145    _free_chunks[SpecializedIndex].set_size(specialized_size);
146    _free_chunks[SmallIndex].set_size(small_size);
147    _free_chunks[MediumIndex].set_size(medium_size);
148  }
149
150  // add or delete (return) a chunk to the global freelist.
151  Metachunk* chunk_freelist_allocate(size_t word_size);
152
153  // Map a size to a list index assuming that there are lists
154  // for special, small, medium, and humongous chunks.
155  static ChunkIndex list_index(size_t size);
156
157  // Remove the chunk from its freelist.  It is
158  // expected to be on one of the _free_chunks[] lists.
159  void remove_chunk(Metachunk* chunk);
160
161  // Add the simple linked list of chunks to the freelist of chunks
162  // of type index.
163  void return_chunks(ChunkIndex index, Metachunk* chunks);
164
165  // Total of the space in the free chunks list
166  size_t free_chunks_total_words();
167  size_t free_chunks_total_bytes();
168
169  // Number of chunks in the free chunks list
170  size_t free_chunks_count();
171
172  void inc_free_chunks_total(size_t v, size_t count = 1) {
173    Atomic::add_ptr(count, &_free_chunks_count);
174    Atomic::add_ptr(v, &_free_chunks_total);
175  }
176  ChunkTreeDictionary* humongous_dictionary() {
177    return &_humongous_dictionary;
178  }
179
180  ChunkList* free_chunks(ChunkIndex index);
181
182  // Returns the list for the given chunk word size.
183  ChunkList* find_free_chunks_list(size_t word_size);
184
185  // Remove from a list by size.  Selects list based on size of chunk.
186  Metachunk* free_chunks_get(size_t chunk_word_size);
187
188#define index_bounds_check(index)                                         \
189  assert(index == SpecializedIndex ||                                     \
190         index == SmallIndex ||                                           \
191         index == MediumIndex ||                                          \
192         index == HumongousIndex, err_msg("Bad index: %d", (int) index))
193
194  size_t num_free_chunks(ChunkIndex index) const {
195    index_bounds_check(index);
196
197    if (index == HumongousIndex) {
198      return _humongous_dictionary.total_free_blocks();
199    }
200
201    ssize_t count = _free_chunks[index].count();
202    return count == -1 ? 0 : (size_t) count;
203  }
204
205  size_t size_free_chunks_in_bytes(ChunkIndex index) const {
206    index_bounds_check(index);
207
208    size_t word_size = 0;
209    if (index == HumongousIndex) {
210      word_size = _humongous_dictionary.total_size();
211    } else {
212      const size_t size_per_chunk_in_words = _free_chunks[index].size();
213      word_size = size_per_chunk_in_words * num_free_chunks(index);
214    }
215
216    return word_size * BytesPerWord;
217  }
218
219  MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
220    return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
221                                         num_free_chunks(SmallIndex),
222                                         num_free_chunks(MediumIndex),
223                                         num_free_chunks(HumongousIndex),
224                                         size_free_chunks_in_bytes(SpecializedIndex),
225                                         size_free_chunks_in_bytes(SmallIndex),
226                                         size_free_chunks_in_bytes(MediumIndex),
227                                         size_free_chunks_in_bytes(HumongousIndex));
228  }
229
230  // Debug support
231  void verify();
232  void slow_verify() {
233    if (metaspace_slow_verify) {
234      verify();
235    }
236  }
237  void locked_verify();
238  void slow_locked_verify() {
239    if (metaspace_slow_verify) {
240      locked_verify();
241    }
242  }
243  void verify_free_chunks_total();
244
245  void locked_print_free_chunks(outputStream* st);
246  void locked_print_sum_free_chunks(outputStream* st);
247
248  void print_on(outputStream* st) const;
249};
250
251// Used to manage the free list of Metablocks (a block corresponds
252// to the allocation of a quantum of metadata).
253class BlockFreelist VALUE_OBJ_CLASS_SPEC {
254  BlockTreeDictionary* _dictionary;
255
256  // Only allocate and split from freelist if the size of the allocation
257  // is at least 1/4th the size of the available block.
258  const static int WasteMultiplier = 4;
259
260  // Accessors
261  BlockTreeDictionary* dictionary() const { return _dictionary; }
262
263 public:
264  BlockFreelist();
265  ~BlockFreelist();
266
267  // Get and return a block to the free list
268  MetaWord* get_block(size_t word_size);
269  void return_block(MetaWord* p, size_t word_size);
270
271  size_t total_size() {
272  if (dictionary() == NULL) {
273    return 0;
274  } else {
275    return dictionary()->total_size();
276  }
277}
278
279  void print_on(outputStream* st) const;
280};
281
282// A VirtualSpaceList node.
283class VirtualSpaceNode : public CHeapObj<mtClass> {
284  friend class VirtualSpaceList;
285
286  // Link to next VirtualSpaceNode
287  VirtualSpaceNode* _next;
288
289  // total in the VirtualSpace
290  MemRegion _reserved;
291  ReservedSpace _rs;
292  VirtualSpace _virtual_space;
293  MetaWord* _top;
294  // count of chunks contained in this VirtualSpace
295  uintx _container_count;
296
297  // Convenience functions to access the _virtual_space
298  char* low()  const { return virtual_space()->low(); }
299  char* high() const { return virtual_space()->high(); }
300
301  // The first Metachunk will be allocated at the bottom of the
302  // VirtualSpace
303  Metachunk* first_chunk() { return (Metachunk*) bottom(); }
304
305  // Committed but unused space in the virtual space
306  size_t free_words_in_vs() const;
307 public:
308
309  VirtualSpaceNode(size_t byte_size);
310  VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
311  ~VirtualSpaceNode();
312
313  // Convenience functions for logical bottom and end
314  MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
315  MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
316
317  size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
318  size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
319
320  bool is_pre_committed() const { return _virtual_space.special(); }
321
322  // address of next available space in _virtual_space;
323  // Accessors
324  VirtualSpaceNode* next() { return _next; }
325  void set_next(VirtualSpaceNode* v) { _next = v; }
326
327  void set_reserved(MemRegion const v) { _reserved = v; }
328  void set_top(MetaWord* v) { _top = v; }
329
330  // Accessors
331  MemRegion* reserved() { return &_reserved; }
332  VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
333
334  // Returns true if "word_size" is available in the VirtualSpace
335  bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
336
337  MetaWord* top() const { return _top; }
338  void inc_top(size_t word_size) { _top += word_size; }
339
340  uintx container_count() { return _container_count; }
341  void inc_container_count();
342  void dec_container_count();
343#ifdef ASSERT
344  uint container_count_slow();
345  void verify_container_count();
346#endif
347
348  // used and capacity in this single entry in the list
349  size_t used_words_in_vs() const;
350  size_t capacity_words_in_vs() const;
351
352  bool initialize();
353
354  // get space from the virtual space
355  Metachunk* take_from_committed(size_t chunk_word_size);
356
357  // Allocate a chunk from the virtual space and return it.
358  Metachunk* get_chunk_vs(size_t chunk_word_size);
359
360  // Expands/shrinks the committed space in a virtual space.  Delegates
361  // to Virtualspace
362  bool expand_by(size_t min_words, size_t preferred_words);
363
364  // In preparation for deleting this node, remove all the chunks
365  // in the node from any freelist.
366  void purge(ChunkManager* chunk_manager);
367
368  // If an allocation doesn't fit in the current node a new node is created.
369  // Allocate chunks out of the remaining committed space in this node
370  // to avoid wasting that memory.
371  // This always adds up because all the chunk sizes are multiples of
372  // the smallest chunk size.
373  void retire(ChunkManager* chunk_manager);
374
375#ifdef ASSERT
376  // Debug support
377  void mangle();
378#endif
379
380  void print_on(outputStream* st) const;
381};
382
383#define assert_is_ptr_aligned(ptr, alignment) \
384  assert(is_ptr_aligned(ptr, alignment),      \
385    err_msg(PTR_FORMAT " is not aligned to "  \
386      SIZE_FORMAT, ptr, alignment))
387
388#define assert_is_size_aligned(size, alignment) \
389  assert(is_size_aligned(size, alignment),      \
390    err_msg(SIZE_FORMAT " is not aligned to "   \
391       SIZE_FORMAT, size, alignment))
392
393
394// Decide if large pages should be committed when the memory is reserved.
395static bool should_commit_large_pages_when_reserving(size_t bytes) {
396  if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
397    size_t words = bytes / BytesPerWord;
398    bool is_class = false; // We never reserve large pages for the class space.
399    if (MetaspaceGC::can_expand(words, is_class) &&
400        MetaspaceGC::allowed_expansion() >= words) {
401      return true;
402    }
403  }
404
405  return false;
406}
407
408  // byte_size is the size of the associated virtualspace.
409VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
410  assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
411
412  // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
413  // configurable address, generally at the top of the Java heap so other
414  // memory addresses don't conflict.
415  if (DumpSharedSpaces) {
416    bool large_pages = false; // No large pages when dumping the CDS archive.
417    char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
418
419    _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0);
420    if (_rs.is_reserved()) {
421      assert(shared_base == 0 || _rs.base() == shared_base, "should match");
422    } else {
423      // Get a mmap region anywhere if the SharedBaseAddress fails.
424      _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
425    }
426    MetaspaceShared::set_shared_rs(&_rs);
427  } else {
428    bool large_pages = should_commit_large_pages_when_reserving(bytes);
429
430    _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
431  }
432
433  if (_rs.is_reserved()) {
434    assert(_rs.base() != NULL, "Catch if we get a NULL address");
435    assert(_rs.size() != 0, "Catch if we get a 0 size");
436    assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
437    assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
438
439    MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
440  }
441}
442
443void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
444  Metachunk* chunk = first_chunk();
445  Metachunk* invalid_chunk = (Metachunk*) top();
446  while (chunk < invalid_chunk ) {
447    assert(chunk->is_tagged_free(), "Should be tagged free");
448    MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
449    chunk_manager->remove_chunk(chunk);
450    assert(chunk->next() == NULL &&
451           chunk->prev() == NULL,
452           "Was not removed from its list");
453    chunk = (Metachunk*) next;
454  }
455}
456
457#ifdef ASSERT
458uint VirtualSpaceNode::container_count_slow() {
459  uint count = 0;
460  Metachunk* chunk = first_chunk();
461  Metachunk* invalid_chunk = (Metachunk*) top();
462  while (chunk < invalid_chunk ) {
463    MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
464    // Don't count the chunks on the free lists.  Those are
465    // still part of the VirtualSpaceNode but not currently
466    // counted.
467    if (!chunk->is_tagged_free()) {
468      count++;
469    }
470    chunk = (Metachunk*) next;
471  }
472  return count;
473}
474#endif
475
476// List of VirtualSpaces for metadata allocation.
477class VirtualSpaceList : public CHeapObj<mtClass> {
478  friend class VirtualSpaceNode;
479
480  enum VirtualSpaceSizes {
481    VirtualSpaceSize = 256 * K
482  };
483
484  // Head of the list
485  VirtualSpaceNode* _virtual_space_list;
486  // virtual space currently being used for allocations
487  VirtualSpaceNode* _current_virtual_space;
488
489  // Is this VirtualSpaceList used for the compressed class space
490  bool _is_class;
491
492  // Sum of reserved and committed memory in the virtual spaces
493  size_t _reserved_words;
494  size_t _committed_words;
495
496  // Number of virtual spaces
497  size_t _virtual_space_count;
498
499  ~VirtualSpaceList();
500
501  VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
502
503  void set_virtual_space_list(VirtualSpaceNode* v) {
504    _virtual_space_list = v;
505  }
506  void set_current_virtual_space(VirtualSpaceNode* v) {
507    _current_virtual_space = v;
508  }
509
510  void link_vs(VirtualSpaceNode* new_entry);
511
512  // Get another virtual space and add it to the list.  This
513  // is typically prompted by a failed attempt to allocate a chunk
514  // and is typically followed by the allocation of a chunk.
515  bool create_new_virtual_space(size_t vs_word_size);
516
517  // Chunk up the unused committed space in the current
518  // virtual space and add the chunks to the free list.
519  void retire_current_virtual_space();
520
521 public:
522  VirtualSpaceList(size_t word_size);
523  VirtualSpaceList(ReservedSpace rs);
524
525  size_t free_bytes();
526
527  Metachunk* get_new_chunk(size_t word_size,
528                           size_t grow_chunks_by_words,
529                           size_t medium_chunk_bunch);
530
531  bool expand_node_by(VirtualSpaceNode* node,
532                      size_t min_words,
533                      size_t preferred_words);
534
535  bool expand_by(size_t min_words,
536                 size_t preferred_words);
537
538  VirtualSpaceNode* current_virtual_space() {
539    return _current_virtual_space;
540  }
541
542  bool is_class() const { return _is_class; }
543
544  bool initialization_succeeded() { return _virtual_space_list != NULL; }
545
546  size_t reserved_words()  { return _reserved_words; }
547  size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
548  size_t committed_words() { return _committed_words; }
549  size_t committed_bytes() { return committed_words() * BytesPerWord; }
550
551  void inc_reserved_words(size_t v);
552  void dec_reserved_words(size_t v);
553  void inc_committed_words(size_t v);
554  void dec_committed_words(size_t v);
555  void inc_virtual_space_count();
556  void dec_virtual_space_count();
557
558  // Unlink empty VirtualSpaceNodes and free it.
559  void purge(ChunkManager* chunk_manager);
560
561  void print_on(outputStream* st) const;
562
563  class VirtualSpaceListIterator : public StackObj {
564    VirtualSpaceNode* _virtual_spaces;
565   public:
566    VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
567      _virtual_spaces(virtual_spaces) {}
568
569    bool repeat() {
570      return _virtual_spaces != NULL;
571    }
572
573    VirtualSpaceNode* get_next() {
574      VirtualSpaceNode* result = _virtual_spaces;
575      if (_virtual_spaces != NULL) {
576        _virtual_spaces = _virtual_spaces->next();
577      }
578      return result;
579    }
580  };
581};
582
583class Metadebug : AllStatic {
584  // Debugging support for Metaspaces
585  static int _allocation_fail_alot_count;
586
587 public:
588
589  static void init_allocation_fail_alot_count();
590#ifdef ASSERT
591  static bool test_metadata_failure();
592#endif
593};
594
595int Metadebug::_allocation_fail_alot_count = 0;
596
597//  SpaceManager - used by Metaspace to handle allocations
598class SpaceManager : public CHeapObj<mtClass> {
599  friend class Metaspace;
600  friend class Metadebug;
601
602 private:
603
604  // protects allocations
605  Mutex* const _lock;
606
607  // Type of metadata allocated.
608  Metaspace::MetadataType _mdtype;
609
610  // List of chunks in use by this SpaceManager.  Allocations
611  // are done from the current chunk.  The list is used for deallocating
612  // chunks when the SpaceManager is freed.
613  Metachunk* _chunks_in_use[NumberOfInUseLists];
614  Metachunk* _current_chunk;
615
616  // Number of small chunks to allocate to a manager
617  // If class space manager, small chunks are unlimited
618  static uint const _small_chunk_limit;
619
620  // Sum of all space in allocated chunks
621  size_t _allocated_blocks_words;
622
623  // Sum of all allocated chunks
624  size_t _allocated_chunks_words;
625  size_t _allocated_chunks_count;
626
627  // Free lists of blocks are per SpaceManager since they
628  // are assumed to be in chunks in use by the SpaceManager
629  // and all chunks in use by a SpaceManager are freed when
630  // the class loader using the SpaceManager is collected.
631  BlockFreelist _block_freelists;
632
633  // protects virtualspace and chunk expansions
634  static const char*  _expand_lock_name;
635  static const int    _expand_lock_rank;
636  static Mutex* const _expand_lock;
637
638 private:
639  // Accessors
640  Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
641  void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
642    // ensure lock-free iteration sees fully initialized node
643    OrderAccess::storestore();
644    _chunks_in_use[index] = v;
645  }
646
647  BlockFreelist* block_freelists() const {
648    return (BlockFreelist*) &_block_freelists;
649  }
650
651  Metaspace::MetadataType mdtype() { return _mdtype; }
652
653  VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
654  ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
655
656  Metachunk* current_chunk() const { return _current_chunk; }
657  void set_current_chunk(Metachunk* v) {
658    _current_chunk = v;
659  }
660
661  Metachunk* find_current_chunk(size_t word_size);
662
663  // Add chunk to the list of chunks in use
664  void add_chunk(Metachunk* v, bool make_current);
665  void retire_current_chunk();
666
667  Mutex* lock() const { return _lock; }
668
669  const char* chunk_size_name(ChunkIndex index) const;
670
671 protected:
672  void initialize();
673
674 public:
675  SpaceManager(Metaspace::MetadataType mdtype,
676               Mutex* lock);
677  ~SpaceManager();
678
679  enum ChunkMultiples {
680    MediumChunkMultiple = 4
681  };
682
683  bool is_class() { return _mdtype == Metaspace::ClassType; }
684
685  // Accessors
686  size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; }
687  size_t small_chunk_size()       { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
688  size_t medium_chunk_size()      { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
689  size_t medium_chunk_bunch()     { return medium_chunk_size() * MediumChunkMultiple; }
690
691  size_t smallest_chunk_size()  { return specialized_chunk_size(); }
692
693  size_t allocated_blocks_words() const { return _allocated_blocks_words; }
694  size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
695  size_t allocated_chunks_words() const { return _allocated_chunks_words; }
696  size_t allocated_chunks_count() const { return _allocated_chunks_count; }
697
698  bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
699
700  static Mutex* expand_lock() { return _expand_lock; }
701
702  // Increment the per Metaspace and global running sums for Metachunks
703  // by the given size.  This is used when a Metachunk to added to
704  // the in-use list.
705  void inc_size_metrics(size_t words);
706  // Increment the per Metaspace and global running sums Metablocks by the given
707  // size.  This is used when a Metablock is allocated.
708  void inc_used_metrics(size_t words);
709  // Delete the portion of the running sums for this SpaceManager. That is,
710  // the globals running sums for the Metachunks and Metablocks are
711  // decremented for all the Metachunks in-use by this SpaceManager.
712  void dec_total_from_size_metrics();
713
714  // Set the sizes for the initial chunks.
715  void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
716                               size_t* chunk_word_size,
717                               size_t* class_chunk_word_size);
718
719  size_t sum_capacity_in_chunks_in_use() const;
720  size_t sum_used_in_chunks_in_use() const;
721  size_t sum_free_in_chunks_in_use() const;
722  size_t sum_waste_in_chunks_in_use() const;
723  size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
724
725  size_t sum_count_in_chunks_in_use();
726  size_t sum_count_in_chunks_in_use(ChunkIndex i);
727
728  Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);
729
730  // Block allocation and deallocation.
731  // Allocates a block from the current chunk
732  MetaWord* allocate(size_t word_size);
733
734  // Helper for allocations
735  MetaWord* allocate_work(size_t word_size);
736
737  // Returns a block to the per manager freelist
738  void deallocate(MetaWord* p, size_t word_size);
739
740  // Based on the allocation size and a minimum chunk size,
741  // returned chunk size (for expanding space for chunk allocation).
742  size_t calc_chunk_size(size_t allocation_word_size);
743
744  // Called when an allocation from the current chunk fails.
745  // Gets a new chunk (may require getting a new virtual space),
746  // and allocates from that chunk.
747  MetaWord* grow_and_allocate(size_t word_size);
748
749  // Notify memory usage to MemoryService.
750  void track_metaspace_memory_usage();
751
752  // debugging support.
753
754  void dump(outputStream* const out) const;
755  void print_on(outputStream* st) const;
756  void locked_print_chunks_in_use_on(outputStream* st) const;
757
758  bool contains(const void *ptr);
759
760  void verify();
761  void verify_chunk_size(Metachunk* chunk);
762  NOT_PRODUCT(void mangle_freed_chunks();)
763#ifdef ASSERT
764  void verify_allocated_blocks_words();
765#endif
766
767  size_t get_raw_word_size(size_t word_size) {
768    size_t byte_size = word_size * BytesPerWord;
769
770    size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
771    raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment());
772
773    size_t raw_word_size = raw_bytes_size / BytesPerWord;
774    assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
775
776    return raw_word_size;
777  }
778};
779
780uint const SpaceManager::_small_chunk_limit = 4;
781
782const char* SpaceManager::_expand_lock_name =
783  "SpaceManager chunk allocation lock";
784const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
785Mutex* const SpaceManager::_expand_lock =
786  new Mutex(SpaceManager::_expand_lock_rank,
787            SpaceManager::_expand_lock_name,
788            Mutex::_allow_vm_block_flag);
789
790void VirtualSpaceNode::inc_container_count() {
791  assert_lock_strong(SpaceManager::expand_lock());
792  _container_count++;
793  assert(_container_count == container_count_slow(),
794         err_msg("Inconsistency in container_count _container_count " SIZE_FORMAT
795                 " container_count_slow() " SIZE_FORMAT,
796                 _container_count, container_count_slow()));
797}
798
799void VirtualSpaceNode::dec_container_count() {
800  assert_lock_strong(SpaceManager::expand_lock());
801  _container_count--;
802}
803
804#ifdef ASSERT
805void VirtualSpaceNode::verify_container_count() {
806  assert(_container_count == container_count_slow(),
807    err_msg("Inconsistency in container_count _container_count " SIZE_FORMAT
808            " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
809}
810#endif
811
812// BlockFreelist methods
813
814BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
815
816BlockFreelist::~BlockFreelist() {
817  if (_dictionary != NULL) {
818    if (Verbose && TraceMetadataChunkAllocation) {
819      _dictionary->print_free_lists(gclog_or_tty);
820    }
821    delete _dictionary;
822  }
823}
824
825void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
826  Metablock* free_chunk = ::new (p) Metablock(word_size);
827  if (dictionary() == NULL) {
828   _dictionary = new BlockTreeDictionary();
829  }
830  dictionary()->return_chunk(free_chunk);
831}
832
833MetaWord* BlockFreelist::get_block(size_t word_size) {
834  if (dictionary() == NULL) {
835    return NULL;
836  }
837
838  if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
839    // Dark matter.  Too small for dictionary.
840    return NULL;
841  }
842
843  Metablock* free_block =
844    dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
845  if (free_block == NULL) {
846    return NULL;
847  }
848
849  const size_t block_size = free_block->size();
850  if (block_size > WasteMultiplier * word_size) {
851    return_block((MetaWord*)free_block, block_size);
852    return NULL;
853  }
854
855  MetaWord* new_block = (MetaWord*)free_block;
856  assert(block_size >= word_size, "Incorrect size of block from freelist");
857  const size_t unused = block_size - word_size;
858  if (unused >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
859    return_block(new_block + word_size, unused);
860  }
861
862  return new_block;
863}
864
865void BlockFreelist::print_on(outputStream* st) const {
866  if (dictionary() == NULL) {
867    return;
868  }
869  dictionary()->print_free_lists(st);
870}
871
872// VirtualSpaceNode methods
873
874VirtualSpaceNode::~VirtualSpaceNode() {
875  _rs.release();
876#ifdef ASSERT
877  size_t word_size = sizeof(*this) / BytesPerWord;
878  Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
879#endif
880}
881
882size_t VirtualSpaceNode::used_words_in_vs() const {
883  return pointer_delta(top(), bottom(), sizeof(MetaWord));
884}
885
886// Space committed in the VirtualSpace
887size_t VirtualSpaceNode::capacity_words_in_vs() const {
888  return pointer_delta(end(), bottom(), sizeof(MetaWord));
889}
890
891size_t VirtualSpaceNode::free_words_in_vs() const {
892  return pointer_delta(end(), top(), sizeof(MetaWord));
893}
894
895// Allocates the chunk from the virtual space only.
896// This interface is also used internally for debugging.  Not all
897// chunks removed here are necessarily used for allocation.
898Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
899  // Bottom of the new chunk
900  MetaWord* chunk_limit = top();
901  assert(chunk_limit != NULL, "Not safe to call this method");
902
903  // The virtual spaces are always expanded by the
904  // commit granularity to enforce the following condition.
905  // Without this the is_available check will not work correctly.
906  assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
907      "The committed memory doesn't match the expanded memory.");
908
909  if (!is_available(chunk_word_size)) {
910    if (TraceMetadataChunkAllocation) {
911      gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
912      // Dump some information about the virtual space that is nearly full
913      print_on(gclog_or_tty);
914    }
915    return NULL;
916  }
917
918  // Take the space  (bump top on the current virtual space).
919  inc_top(chunk_word_size);
920
921  // Initialize the chunk
922  Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
923  return result;
924}
925
926
927// Expand the virtual space (commit more of the reserved space)
928bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
929  size_t min_bytes = min_words * BytesPerWord;
930  size_t preferred_bytes = preferred_words * BytesPerWord;
931
932  size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
933
934  if (uncommitted < min_bytes) {
935    return false;
936  }
937
938  size_t commit = MIN2(preferred_bytes, uncommitted);
939  bool result = virtual_space()->expand_by(commit, false);
940
941  assert(result, "Failed to commit memory");
942
943  return result;
944}
945
946Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
947  assert_lock_strong(SpaceManager::expand_lock());
948  Metachunk* result = take_from_committed(chunk_word_size);
949  if (result != NULL) {
950    inc_container_count();
951  }
952  return result;
953}
954
955bool VirtualSpaceNode::initialize() {
956
957  if (!_rs.is_reserved()) {
958    return false;
959  }
960
961  // These are necessary restriction to make sure that the virtual space always
962  // grows in steps of Metaspace::commit_alignment(). If both base and size are
963  // aligned only the middle alignment of the VirtualSpace is used.
964  assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
965  assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
966
967  // ReservedSpaces marked as special will have the entire memory
968  // pre-committed. Setting a committed size will make sure that
969  // committed_size and actual_committed_size agrees.
970  size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
971
972  bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
973                                            Metaspace::commit_alignment());
974  if (result) {
975    assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
976        "Checking that the pre-committed memory was registered by the VirtualSpace");
977
978    set_top((MetaWord*)virtual_space()->low());
979    set_reserved(MemRegion((HeapWord*)_rs.base(),
980                 (HeapWord*)(_rs.base() + _rs.size())));
981
982    assert(reserved()->start() == (HeapWord*) _rs.base(),
983      err_msg("Reserved start was not set properly " PTR_FORMAT
984        " != " PTR_FORMAT, reserved()->start(), _rs.base()));
985    assert(reserved()->word_size() == _rs.size() / BytesPerWord,
986      err_msg("Reserved size was not set properly " SIZE_FORMAT
987        " != " SIZE_FORMAT, reserved()->word_size(),
988        _rs.size() / BytesPerWord));
989  }
990
991  return result;
992}
993
994void VirtualSpaceNode::print_on(outputStream* st) const {
995  size_t used = used_words_in_vs();
996  size_t capacity = capacity_words_in_vs();
997  VirtualSpace* vs = virtual_space();
998  st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used "
999           "[" PTR_FORMAT ", " PTR_FORMAT ", "
1000           PTR_FORMAT ", " PTR_FORMAT ")",
1001           vs, capacity / K,
1002           capacity == 0 ? 0 : used * 100 / capacity,
1003           bottom(), top(), end(),
1004           vs->high_boundary());
1005}
1006
1007#ifdef ASSERT
1008void VirtualSpaceNode::mangle() {
1009  size_t word_size = capacity_words_in_vs();
1010  Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1011}
1012#endif // ASSERT
1013
1014// VirtualSpaceList methods
1015// Space allocated from the VirtualSpace
1016
1017VirtualSpaceList::~VirtualSpaceList() {
1018  VirtualSpaceListIterator iter(virtual_space_list());
1019  while (iter.repeat()) {
1020    VirtualSpaceNode* vsl = iter.get_next();
1021    delete vsl;
1022  }
1023}
1024
1025void VirtualSpaceList::inc_reserved_words(size_t v) {
1026  assert_lock_strong(SpaceManager::expand_lock());
1027  _reserved_words = _reserved_words + v;
1028}
1029void VirtualSpaceList::dec_reserved_words(size_t v) {
1030  assert_lock_strong(SpaceManager::expand_lock());
1031  _reserved_words = _reserved_words - v;
1032}
1033
1034#define assert_committed_below_limit()                             \
1035  assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize,      \
1036      err_msg("Too much committed memory. Committed: " SIZE_FORMAT \
1037              " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1038          MetaspaceAux::committed_bytes(), MaxMetaspaceSize));
1039
1040void VirtualSpaceList::inc_committed_words(size_t v) {
1041  assert_lock_strong(SpaceManager::expand_lock());
1042  _committed_words = _committed_words + v;
1043
1044  assert_committed_below_limit();
1045}
1046void VirtualSpaceList::dec_committed_words(size_t v) {
1047  assert_lock_strong(SpaceManager::expand_lock());
1048  _committed_words = _committed_words - v;
1049
1050  assert_committed_below_limit();
1051}
1052
1053void VirtualSpaceList::inc_virtual_space_count() {
1054  assert_lock_strong(SpaceManager::expand_lock());
1055  _virtual_space_count++;
1056}
1057void VirtualSpaceList::dec_virtual_space_count() {
1058  assert_lock_strong(SpaceManager::expand_lock());
1059  _virtual_space_count--;
1060}
1061
1062void ChunkManager::remove_chunk(Metachunk* chunk) {
1063  size_t word_size = chunk->word_size();
1064  ChunkIndex index = list_index(word_size);
1065  if (index != HumongousIndex) {
1066    free_chunks(index)->remove_chunk(chunk);
1067  } else {
1068    humongous_dictionary()->remove_chunk(chunk);
1069  }
1070
1071  // Chunk is being removed from the chunks free list.
1072  dec_free_chunks_total(chunk->word_size());
1073}
1074
1075// Walk the list of VirtualSpaceNodes and delete
1076// nodes with a 0 container_count.  Remove Metachunks in
1077// the node from their respective freelists.
1078void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1079  assert_lock_strong(SpaceManager::expand_lock());
1080  // Don't use a VirtualSpaceListIterator because this
1081  // list is being changed and a straightforward use of an iterator is not safe.
1082  VirtualSpaceNode* purged_vsl = NULL;
1083  VirtualSpaceNode* prev_vsl = virtual_space_list();
1084  VirtualSpaceNode* next_vsl = prev_vsl;
1085  while (next_vsl != NULL) {
1086    VirtualSpaceNode* vsl = next_vsl;
1087    next_vsl = vsl->next();
1088    // Don't free the current virtual space since it will likely
1089    // be needed soon.
1090    if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1091      // Unlink it from the list
1092      if (prev_vsl == vsl) {
1093        // This is the case of the current node being the first node.
1094        assert(vsl == virtual_space_list(), "Expected to be the first node");
1095        set_virtual_space_list(vsl->next());
1096      } else {
1097        prev_vsl->set_next(vsl->next());
1098      }
1099
1100      vsl->purge(chunk_manager);
1101      dec_reserved_words(vsl->reserved_words());
1102      dec_committed_words(vsl->committed_words());
1103      dec_virtual_space_count();
1104      purged_vsl = vsl;
1105      delete vsl;
1106    } else {
1107      prev_vsl = vsl;
1108    }
1109  }
1110#ifdef ASSERT
1111  if (purged_vsl != NULL) {
1112  // List should be stable enough to use an iterator here.
1113  VirtualSpaceListIterator iter(virtual_space_list());
1114    while (iter.repeat()) {
1115      VirtualSpaceNode* vsl = iter.get_next();
1116      assert(vsl != purged_vsl, "Purge of vsl failed");
1117    }
1118  }
1119#endif
1120}
1121
1122void VirtualSpaceList::retire_current_virtual_space() {
1123  assert_lock_strong(SpaceManager::expand_lock());
1124
1125  VirtualSpaceNode* vsn = current_virtual_space();
1126
1127  ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
1128                                  Metaspace::chunk_manager_metadata();
1129
1130  vsn->retire(cm);
1131}
1132
1133void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
1134  for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
1135    ChunkIndex index = (ChunkIndex)i;
1136    size_t chunk_size = chunk_manager->free_chunks(index)->size();
1137
1138    while (free_words_in_vs() >= chunk_size) {
1139      DEBUG_ONLY(verify_container_count();)
1140      Metachunk* chunk = get_chunk_vs(chunk_size);
1141      assert(chunk != NULL, "allocation should have been successful");
1142
1143      chunk_manager->return_chunks(index, chunk);
1144      chunk_manager->inc_free_chunks_total(chunk_size);
1145      DEBUG_ONLY(verify_container_count();)
1146    }
1147  }
1148  assert(free_words_in_vs() == 0, "should be empty now");
1149}
1150
1151VirtualSpaceList::VirtualSpaceList(size_t word_size) :
1152                                   _is_class(false),
1153                                   _virtual_space_list(NULL),
1154                                   _current_virtual_space(NULL),
1155                                   _reserved_words(0),
1156                                   _committed_words(0),
1157                                   _virtual_space_count(0) {
1158  MutexLockerEx cl(SpaceManager::expand_lock(),
1159                   Mutex::_no_safepoint_check_flag);
1160  create_new_virtual_space(word_size);
1161}
1162
1163VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1164                                   _is_class(true),
1165                                   _virtual_space_list(NULL),
1166                                   _current_virtual_space(NULL),
1167                                   _reserved_words(0),
1168                                   _committed_words(0),
1169                                   _virtual_space_count(0) {
1170  MutexLockerEx cl(SpaceManager::expand_lock(),
1171                   Mutex::_no_safepoint_check_flag);
1172  VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1173  bool succeeded = class_entry->initialize();
1174  if (succeeded) {
1175    link_vs(class_entry);
1176  }
1177}
1178
1179size_t VirtualSpaceList::free_bytes() {
1180  return virtual_space_list()->free_words_in_vs() * BytesPerWord;
1181}
1182
1183// Allocate another meta virtual space and add it to the list.
1184bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
1185  assert_lock_strong(SpaceManager::expand_lock());
1186
1187  if (is_class()) {
1188    assert(false, "We currently don't support more than one VirtualSpace for"
1189                  " the compressed class space. The initialization of the"
1190                  " CCS uses another code path and should not hit this path.");
1191    return false;
1192  }
1193
1194  if (vs_word_size == 0) {
1195    assert(false, "vs_word_size should always be at least _reserve_alignment large.");
1196    return false;
1197  }
1198
1199  // Reserve the space
1200  size_t vs_byte_size = vs_word_size * BytesPerWord;
1201  assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
1202
1203  // Allocate the meta virtual space and initialize it.
1204  VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1205  if (!new_entry->initialize()) {
1206    delete new_entry;
1207    return false;
1208  } else {
1209    assert(new_entry->reserved_words() == vs_word_size,
1210        "Reserved memory size differs from requested memory size");
1211    link_vs(new_entry);
1212    return true;
1213  }
1214}
1215
1216void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1217  if (virtual_space_list() == NULL) {
1218      set_virtual_space_list(new_entry);
1219  } else {
1220    current_virtual_space()->set_next(new_entry);
1221  }
1222  set_current_virtual_space(new_entry);
1223  inc_reserved_words(new_entry->reserved_words());
1224  inc_committed_words(new_entry->committed_words());
1225  inc_virtual_space_count();
1226#ifdef ASSERT
1227  new_entry->mangle();
1228#endif
1229  if (TraceMetavirtualspaceAllocation && Verbose) {
1230    VirtualSpaceNode* vsl = current_virtual_space();
1231    vsl->print_on(gclog_or_tty);
1232  }
1233}
1234
1235bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1236                                      size_t min_words,
1237                                      size_t preferred_words) {
1238  size_t before = node->committed_words();
1239
1240  bool result = node->expand_by(min_words, preferred_words);
1241
1242  size_t after = node->committed_words();
1243
1244  // after and before can be the same if the memory was pre-committed.
1245  assert(after >= before, "Inconsistency");
1246  inc_committed_words(after - before);
1247
1248  return result;
1249}
1250
1251bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
1252  assert_is_size_aligned(min_words,       Metaspace::commit_alignment_words());
1253  assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
1254  assert(min_words <= preferred_words, "Invalid arguments");
1255
1256  if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
1257    return  false;
1258  }
1259
1260  size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
1261  if (allowed_expansion_words < min_words) {
1262    return false;
1263  }
1264
1265  size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
1266
1267  // Commit more memory from the the current virtual space.
1268  bool vs_expanded = expand_node_by(current_virtual_space(),
1269                                    min_words,
1270                                    max_expansion_words);
1271  if (vs_expanded) {
1272    return true;
1273  }
1274  retire_current_virtual_space();
1275
1276  // Get another virtual space.
1277  size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
1278  grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
1279
1280  if (create_new_virtual_space(grow_vs_words)) {
1281    if (current_virtual_space()->is_pre_committed()) {
1282      // The memory was pre-committed, so we are done here.
1283      assert(min_words <= current_virtual_space()->committed_words(),
1284          "The new VirtualSpace was pre-committed, so it"
1285          "should be large enough to fit the alloc request.");
1286      return true;
1287    }
1288
1289    return expand_node_by(current_virtual_space(),
1290                          min_words,
1291                          max_expansion_words);
1292  }
1293
1294  return false;
1295}
1296
1297Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
1298                                           size_t grow_chunks_by_words,
1299                                           size_t medium_chunk_bunch) {
1300
1301  // Allocate a chunk out of the current virtual space.
1302  Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1303
1304  if (next != NULL) {
1305    return next;
1306  }
1307
1308  // The expand amount is currently only determined by the requested sizes
1309  // and not how much committed memory is left in the current virtual space.
1310
1311  size_t min_word_size       = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
1312  size_t preferred_word_size = align_size_up(medium_chunk_bunch,   Metaspace::commit_alignment_words());
1313  if (min_word_size >= preferred_word_size) {
1314    // Can happen when humongous chunks are allocated.
1315    preferred_word_size = min_word_size;
1316  }
1317
1318  bool expanded = expand_by(min_word_size, preferred_word_size);
1319  if (expanded) {
1320    next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1321    assert(next != NULL, "The allocation was expected to succeed after the expansion");
1322  }
1323
1324   return next;
1325}
1326
1327void VirtualSpaceList::print_on(outputStream* st) const {
1328  if (TraceMetadataChunkAllocation && Verbose) {
1329    VirtualSpaceListIterator iter(virtual_space_list());
1330    while (iter.repeat()) {
1331      VirtualSpaceNode* node = iter.get_next();
1332      node->print_on(st);
1333    }
1334  }
1335}
1336
1337// MetaspaceGC methods
1338
1339// VM_CollectForMetadataAllocation is the vm operation used to GC.
1340// Within the VM operation after the GC the attempt to allocate the metadata
1341// should succeed.  If the GC did not free enough space for the metaspace
1342// allocation, the HWM is increased so that another virtualspace will be
1343// allocated for the metadata.  With perm gen the increase in the perm
1344// gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
1345// metaspace policy uses those as the small and large steps for the HWM.
1346//
1347// After the GC the compute_new_size() for MetaspaceGC is called to
1348// resize the capacity of the metaspaces.  The current implementation
1349// is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1350// to resize the Java heap by some GC's.  New flags can be implemented
1351// if really needed.  MinMetaspaceFreeRatio is used to calculate how much
1352// free space is desirable in the metaspace capacity to decide how much
1353// to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
1354// free space is desirable in the metaspace capacity before decreasing
1355// the HWM.
1356
1357// Calculate the amount to increase the high water mark (HWM).
1358// Increase by a minimum amount (MinMetaspaceExpansion) so that
1359// another expansion is not requested too soon.  If that is not
1360// enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
1361// If that is still not enough, expand by the size of the allocation
1362// plus some.
1363size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
1364  size_t min_delta = MinMetaspaceExpansion;
1365  size_t max_delta = MaxMetaspaceExpansion;
1366  size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
1367
1368  if (delta <= min_delta) {
1369    delta = min_delta;
1370  } else if (delta <= max_delta) {
1371    // Don't want to hit the high water mark on the next
1372    // allocation so make the delta greater than just enough
1373    // for this allocation.
1374    delta = max_delta;
1375  } else {
1376    // This allocation is large but the next ones are probably not
1377    // so increase by the minimum.
1378    delta = delta + min_delta;
1379  }
1380
1381  assert_is_size_aligned(delta, Metaspace::commit_alignment());
1382
1383  return delta;
1384}
1385
1386size_t MetaspaceGC::capacity_until_GC() {
1387  size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
1388  assert(value >= MetaspaceSize, "Not initialied properly?");
1389  return value;
1390}
1391
1392size_t MetaspaceGC::inc_capacity_until_GC(size_t v) {
1393  assert_is_size_aligned(v, Metaspace::commit_alignment());
1394
1395  return (size_t)Atomic::add_ptr(v, &_capacity_until_GC);
1396}
1397
1398size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1399  assert_is_size_aligned(v, Metaspace::commit_alignment());
1400
1401  return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
1402}
1403
1404bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1405  // Check if the compressed class space is full.
1406  if (is_class && Metaspace::using_class_space()) {
1407    size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1408    if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1409      return false;
1410    }
1411  }
1412
1413  // Check if the user has imposed a limit on the metaspace memory.
1414  size_t committed_bytes = MetaspaceAux::committed_bytes();
1415  if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
1416    return false;
1417  }
1418
1419  return true;
1420}
1421
1422size_t MetaspaceGC::allowed_expansion() {
1423  size_t committed_bytes = MetaspaceAux::committed_bytes();
1424
1425  size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
1426
1427  // Always grant expansion if we are initiating the JVM,
1428  // or if the GC_locker is preventing GCs.
1429  if (!is_init_completed() || GC_locker::is_active_and_needs_gc()) {
1430    return left_until_max / BytesPerWord;
1431  }
1432
1433  size_t capacity_until_gc = capacity_until_GC();
1434
1435  if (capacity_until_gc <= committed_bytes) {
1436    return 0;
1437  }
1438
1439  size_t left_until_GC = capacity_until_gc - committed_bytes;
1440  size_t left_to_commit = MIN2(left_until_GC, left_until_max);
1441
1442  return left_to_commit / BytesPerWord;
1443}
1444
1445void MetaspaceGC::compute_new_size() {
1446  assert(_shrink_factor <= 100, "invalid shrink factor");
1447  uint current_shrink_factor = _shrink_factor;
1448  _shrink_factor = 0;
1449
1450  const size_t used_after_gc = MetaspaceAux::capacity_bytes();
1451  const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1452
1453  const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1454  const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1455
1456  const double min_tmp = used_after_gc / maximum_used_percentage;
1457  size_t minimum_desired_capacity =
1458    (size_t)MIN2(min_tmp, double(max_uintx));
1459  // Don't shrink less than the initial generation size
1460  minimum_desired_capacity = MAX2(minimum_desired_capacity,
1461                                  MetaspaceSize);
1462
1463  if (PrintGCDetails && Verbose) {
1464    gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
1465    gclog_or_tty->print_cr("  "
1466                  "  minimum_free_percentage: %6.2f"
1467                  "  maximum_used_percentage: %6.2f",
1468                  minimum_free_percentage,
1469                  maximum_used_percentage);
1470    gclog_or_tty->print_cr("  "
1471                  "   used_after_gc       : %6.1fKB",
1472                  used_after_gc / (double) K);
1473  }
1474
1475
1476  size_t shrink_bytes = 0;
1477  if (capacity_until_GC < minimum_desired_capacity) {
1478    // If we have less capacity below the metaspace HWM, then
1479    // increment the HWM.
1480    size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1481    expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
1482    // Don't expand unless it's significant
1483    if (expand_bytes >= MinMetaspaceExpansion) {
1484      size_t new_capacity_until_GC = MetaspaceGC::inc_capacity_until_GC(expand_bytes);
1485      Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1486                                               new_capacity_until_GC,
1487                                               MetaspaceGCThresholdUpdater::ComputeNewSize);
1488      if (PrintGCDetails && Verbose) {
1489        gclog_or_tty->print_cr("    expanding:"
1490                      "  minimum_desired_capacity: %6.1fKB"
1491                      "  expand_bytes: %6.1fKB"
1492                      "  MinMetaspaceExpansion: %6.1fKB"
1493                      "  new metaspace HWM:  %6.1fKB",
1494                      minimum_desired_capacity / (double) K,
1495                      expand_bytes / (double) K,
1496                      MinMetaspaceExpansion / (double) K,
1497                      new_capacity_until_GC / (double) K);
1498      }
1499    }
1500    return;
1501  }
1502
1503  // No expansion, now see if we want to shrink
1504  // We would never want to shrink more than this
1505  assert(capacity_until_GC >= minimum_desired_capacity,
1506         err_msg(SIZE_FORMAT " >= " SIZE_FORMAT,
1507                 capacity_until_GC, minimum_desired_capacity));
1508  size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1509
1510  // Should shrinking be considered?
1511  if (MaxMetaspaceFreeRatio < 100) {
1512    const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1513    const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1514    const double max_tmp = used_after_gc / minimum_used_percentage;
1515    size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1516    maximum_desired_capacity = MAX2(maximum_desired_capacity,
1517                                    MetaspaceSize);
1518    if (PrintGCDetails && Verbose) {
1519      gclog_or_tty->print_cr("  "
1520                             "  maximum_free_percentage: %6.2f"
1521                             "  minimum_used_percentage: %6.2f",
1522                             maximum_free_percentage,
1523                             minimum_used_percentage);
1524      gclog_or_tty->print_cr("  "
1525                             "  minimum_desired_capacity: %6.1fKB"
1526                             "  maximum_desired_capacity: %6.1fKB",
1527                             minimum_desired_capacity / (double) K,
1528                             maximum_desired_capacity / (double) K);
1529    }
1530
1531    assert(minimum_desired_capacity <= maximum_desired_capacity,
1532           "sanity check");
1533
1534    if (capacity_until_GC > maximum_desired_capacity) {
1535      // Capacity too large, compute shrinking size
1536      shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1537      // We don't want shrink all the way back to initSize if people call
1538      // System.gc(), because some programs do that between "phases" and then
1539      // we'd just have to grow the heap up again for the next phase.  So we
1540      // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1541      // on the third call, and 100% by the fourth call.  But if we recompute
1542      // size without shrinking, it goes back to 0%.
1543      shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1544
1545      shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
1546
1547      assert(shrink_bytes <= max_shrink_bytes,
1548        err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1549          shrink_bytes, max_shrink_bytes));
1550      if (current_shrink_factor == 0) {
1551        _shrink_factor = 10;
1552      } else {
1553        _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1554      }
1555      if (PrintGCDetails && Verbose) {
1556        gclog_or_tty->print_cr("  "
1557                      "  shrinking:"
1558                      "  initSize: %.1fK"
1559                      "  maximum_desired_capacity: %.1fK",
1560                      MetaspaceSize / (double) K,
1561                      maximum_desired_capacity / (double) K);
1562        gclog_or_tty->print_cr("  "
1563                      "  shrink_bytes: %.1fK"
1564                      "  current_shrink_factor: %d"
1565                      "  new shrink factor: %d"
1566                      "  MinMetaspaceExpansion: %.1fK",
1567                      shrink_bytes / (double) K,
1568                      current_shrink_factor,
1569                      _shrink_factor,
1570                      MinMetaspaceExpansion / (double) K);
1571      }
1572    }
1573  }
1574
1575  // Don't shrink unless it's significant
1576  if (shrink_bytes >= MinMetaspaceExpansion &&
1577      ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1578    size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1579    Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1580                                             new_capacity_until_GC,
1581                                             MetaspaceGCThresholdUpdater::ComputeNewSize);
1582  }
1583}
1584
1585// Metadebug methods
1586
1587void Metadebug::init_allocation_fail_alot_count() {
1588  if (MetadataAllocationFailALot) {
1589    _allocation_fail_alot_count =
1590      1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1591  }
1592}
1593
1594#ifdef ASSERT
1595bool Metadebug::test_metadata_failure() {
1596  if (MetadataAllocationFailALot &&
1597      Threads::is_vm_complete()) {
1598    if (_allocation_fail_alot_count > 0) {
1599      _allocation_fail_alot_count--;
1600    } else {
1601      if (TraceMetadataChunkAllocation && Verbose) {
1602        gclog_or_tty->print_cr("Metadata allocation failing for "
1603                               "MetadataAllocationFailALot");
1604      }
1605      init_allocation_fail_alot_count();
1606      return true;
1607    }
1608  }
1609  return false;
1610}
1611#endif
1612
1613// ChunkManager methods
1614
1615size_t ChunkManager::free_chunks_total_words() {
1616  return _free_chunks_total;
1617}
1618
1619size_t ChunkManager::free_chunks_total_bytes() {
1620  return free_chunks_total_words() * BytesPerWord;
1621}
1622
1623size_t ChunkManager::free_chunks_count() {
1624#ifdef ASSERT
1625  if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1626    MutexLockerEx cl(SpaceManager::expand_lock(),
1627                     Mutex::_no_safepoint_check_flag);
1628    // This lock is only needed in debug because the verification
1629    // of the _free_chunks_totals walks the list of free chunks
1630    slow_locked_verify_free_chunks_count();
1631  }
1632#endif
1633  return _free_chunks_count;
1634}
1635
1636void ChunkManager::locked_verify_free_chunks_total() {
1637  assert_lock_strong(SpaceManager::expand_lock());
1638  assert(sum_free_chunks() == _free_chunks_total,
1639    err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
1640           " same as sum " SIZE_FORMAT, _free_chunks_total,
1641           sum_free_chunks()));
1642}
1643
1644void ChunkManager::verify_free_chunks_total() {
1645  MutexLockerEx cl(SpaceManager::expand_lock(),
1646                     Mutex::_no_safepoint_check_flag);
1647  locked_verify_free_chunks_total();
1648}
1649
1650void ChunkManager::locked_verify_free_chunks_count() {
1651  assert_lock_strong(SpaceManager::expand_lock());
1652  assert(sum_free_chunks_count() == _free_chunks_count,
1653    err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
1654           " same as sum " SIZE_FORMAT, _free_chunks_count,
1655           sum_free_chunks_count()));
1656}
1657
1658void ChunkManager::verify_free_chunks_count() {
1659#ifdef ASSERT
1660  MutexLockerEx cl(SpaceManager::expand_lock(),
1661                     Mutex::_no_safepoint_check_flag);
1662  locked_verify_free_chunks_count();
1663#endif
1664}
1665
1666void ChunkManager::verify() {
1667  MutexLockerEx cl(SpaceManager::expand_lock(),
1668                     Mutex::_no_safepoint_check_flag);
1669  locked_verify();
1670}
1671
1672void ChunkManager::locked_verify() {
1673  locked_verify_free_chunks_count();
1674  locked_verify_free_chunks_total();
1675}
1676
1677void ChunkManager::locked_print_free_chunks(outputStream* st) {
1678  assert_lock_strong(SpaceManager::expand_lock());
1679  st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1680                _free_chunks_total, _free_chunks_count);
1681}
1682
1683void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1684  assert_lock_strong(SpaceManager::expand_lock());
1685  st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1686                sum_free_chunks(), sum_free_chunks_count());
1687}
1688ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1689  return &_free_chunks[index];
1690}
1691
1692// These methods that sum the free chunk lists are used in printing
1693// methods that are used in product builds.
1694size_t ChunkManager::sum_free_chunks() {
1695  assert_lock_strong(SpaceManager::expand_lock());
1696  size_t result = 0;
1697  for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1698    ChunkList* list = free_chunks(i);
1699
1700    if (list == NULL) {
1701      continue;
1702    }
1703
1704    result = result + list->count() * list->size();
1705  }
1706  result = result + humongous_dictionary()->total_size();
1707  return result;
1708}
1709
1710size_t ChunkManager::sum_free_chunks_count() {
1711  assert_lock_strong(SpaceManager::expand_lock());
1712  size_t count = 0;
1713  for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1714    ChunkList* list = free_chunks(i);
1715    if (list == NULL) {
1716      continue;
1717    }
1718    count = count + list->count();
1719  }
1720  count = count + humongous_dictionary()->total_free_blocks();
1721  return count;
1722}
1723
1724ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1725  ChunkIndex index = list_index(word_size);
1726  assert(index < HumongousIndex, "No humongous list");
1727  return free_chunks(index);
1728}
1729
1730Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1731  assert_lock_strong(SpaceManager::expand_lock());
1732
1733  slow_locked_verify();
1734
1735  Metachunk* chunk = NULL;
1736  if (list_index(word_size) != HumongousIndex) {
1737    ChunkList* free_list = find_free_chunks_list(word_size);
1738    assert(free_list != NULL, "Sanity check");
1739
1740    chunk = free_list->head();
1741
1742    if (chunk == NULL) {
1743      return NULL;
1744    }
1745
1746    // Remove the chunk as the head of the list.
1747    free_list->remove_chunk(chunk);
1748
1749    if (TraceMetadataChunkAllocation && Verbose) {
1750      gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
1751                             PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1752                             free_list, chunk, chunk->word_size());
1753    }
1754  } else {
1755    chunk = humongous_dictionary()->get_chunk(
1756      word_size,
1757      FreeBlockDictionary<Metachunk>::atLeast);
1758
1759    if (chunk == NULL) {
1760      return NULL;
1761    }
1762
1763    if (TraceMetadataHumongousAllocation) {
1764      size_t waste = chunk->word_size() - word_size;
1765      gclog_or_tty->print_cr("Free list allocate humongous chunk size "
1766                             SIZE_FORMAT " for requested size " SIZE_FORMAT
1767                             " waste " SIZE_FORMAT,
1768                             chunk->word_size(), word_size, waste);
1769    }
1770  }
1771
1772  // Chunk is being removed from the chunks free list.
1773  dec_free_chunks_total(chunk->word_size());
1774
1775  // Remove it from the links to this freelist
1776  chunk->set_next(NULL);
1777  chunk->set_prev(NULL);
1778#ifdef ASSERT
1779  // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1780  // work.
1781  chunk->set_is_tagged_free(false);
1782#endif
1783  chunk->container()->inc_container_count();
1784
1785  slow_locked_verify();
1786  return chunk;
1787}
1788
1789Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1790  assert_lock_strong(SpaceManager::expand_lock());
1791  slow_locked_verify();
1792
1793  // Take from the beginning of the list
1794  Metachunk* chunk = free_chunks_get(word_size);
1795  if (chunk == NULL) {
1796    return NULL;
1797  }
1798
1799  assert((word_size <= chunk->word_size()) ||
1800         list_index(chunk->word_size() == HumongousIndex),
1801         "Non-humongous variable sized chunk");
1802  if (TraceMetadataChunkAllocation) {
1803    size_t list_count;
1804    if (list_index(word_size) < HumongousIndex) {
1805      ChunkList* list = find_free_chunks_list(word_size);
1806      list_count = list->count();
1807    } else {
1808      list_count = humongous_dictionary()->total_count();
1809    }
1810    gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
1811                        PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1812                        this, chunk, chunk->word_size(), list_count);
1813    locked_print_free_chunks(gclog_or_tty);
1814  }
1815
1816  return chunk;
1817}
1818
1819void ChunkManager::print_on(outputStream* out) const {
1820  if (PrintFLSStatistics != 0) {
1821    const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
1822  }
1823}
1824
1825// SpaceManager methods
1826
1827void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
1828                                           size_t* chunk_word_size,
1829                                           size_t* class_chunk_word_size) {
1830  switch (type) {
1831  case Metaspace::BootMetaspaceType:
1832    *chunk_word_size = Metaspace::first_chunk_word_size();
1833    *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
1834    break;
1835  case Metaspace::ROMetaspaceType:
1836    *chunk_word_size = SharedReadOnlySize / wordSize;
1837    *class_chunk_word_size = ClassSpecializedChunk;
1838    break;
1839  case Metaspace::ReadWriteMetaspaceType:
1840    *chunk_word_size = SharedReadWriteSize / wordSize;
1841    *class_chunk_word_size = ClassSpecializedChunk;
1842    break;
1843  case Metaspace::AnonymousMetaspaceType:
1844  case Metaspace::ReflectionMetaspaceType:
1845    *chunk_word_size = SpecializedChunk;
1846    *class_chunk_word_size = ClassSpecializedChunk;
1847    break;
1848  default:
1849    *chunk_word_size = SmallChunk;
1850    *class_chunk_word_size = ClassSmallChunk;
1851    break;
1852  }
1853  assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
1854    err_msg("Initial chunks sizes bad: data  " SIZE_FORMAT
1855            " class " SIZE_FORMAT,
1856            *chunk_word_size, *class_chunk_word_size));
1857}
1858
1859size_t SpaceManager::sum_free_in_chunks_in_use() const {
1860  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1861  size_t free = 0;
1862  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1863    Metachunk* chunk = chunks_in_use(i);
1864    while (chunk != NULL) {
1865      free += chunk->free_word_size();
1866      chunk = chunk->next();
1867    }
1868  }
1869  return free;
1870}
1871
1872size_t SpaceManager::sum_waste_in_chunks_in_use() const {
1873  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1874  size_t result = 0;
1875  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1876   result += sum_waste_in_chunks_in_use(i);
1877  }
1878
1879  return result;
1880}
1881
1882size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
1883  size_t result = 0;
1884  Metachunk* chunk = chunks_in_use(index);
1885  // Count the free space in all the chunk but not the
1886  // current chunk from which allocations are still being done.
1887  while (chunk != NULL) {
1888    if (chunk != current_chunk()) {
1889      result += chunk->free_word_size();
1890    }
1891    chunk = chunk->next();
1892  }
1893  return result;
1894}
1895
1896size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
1897  // For CMS use "allocated_chunks_words()" which does not need the
1898  // Metaspace lock.  For the other collectors sum over the
1899  // lists.  Use both methods as a check that "allocated_chunks_words()"
1900  // is correct.  That is, sum_capacity_in_chunks() is too expensive
1901  // to use in the product and allocated_chunks_words() should be used
1902  // but allow for  checking that allocated_chunks_words() returns the same
1903  // value as sum_capacity_in_chunks_in_use() which is the definitive
1904  // answer.
1905  if (UseConcMarkSweepGC) {
1906    return allocated_chunks_words();
1907  } else {
1908    MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1909    size_t sum = 0;
1910    for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1911      Metachunk* chunk = chunks_in_use(i);
1912      while (chunk != NULL) {
1913        sum += chunk->word_size();
1914        chunk = chunk->next();
1915      }
1916    }
1917  return sum;
1918  }
1919}
1920
1921size_t SpaceManager::sum_count_in_chunks_in_use() {
1922  size_t count = 0;
1923  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1924    count = count + sum_count_in_chunks_in_use(i);
1925  }
1926
1927  return count;
1928}
1929
1930size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
1931  size_t count = 0;
1932  Metachunk* chunk = chunks_in_use(i);
1933  while (chunk != NULL) {
1934    count++;
1935    chunk = chunk->next();
1936  }
1937  return count;
1938}
1939
1940
1941size_t SpaceManager::sum_used_in_chunks_in_use() const {
1942  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1943  size_t used = 0;
1944  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1945    Metachunk* chunk = chunks_in_use(i);
1946    while (chunk != NULL) {
1947      used += chunk->used_word_size();
1948      chunk = chunk->next();
1949    }
1950  }
1951  return used;
1952}
1953
1954void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
1955
1956  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1957    Metachunk* chunk = chunks_in_use(i);
1958    st->print("SpaceManager: %s " PTR_FORMAT,
1959                 chunk_size_name(i), chunk);
1960    if (chunk != NULL) {
1961      st->print_cr(" free " SIZE_FORMAT,
1962                   chunk->free_word_size());
1963    } else {
1964      st->print_cr("");
1965    }
1966  }
1967
1968  chunk_manager()->locked_print_free_chunks(st);
1969  chunk_manager()->locked_print_sum_free_chunks(st);
1970}
1971
1972size_t SpaceManager::calc_chunk_size(size_t word_size) {
1973
1974  // Decide between a small chunk and a medium chunk.  Up to
1975  // _small_chunk_limit small chunks can be allocated but
1976  // once a medium chunk has been allocated, no more small
1977  // chunks will be allocated.
1978  size_t chunk_word_size;
1979  if (chunks_in_use(MediumIndex) == NULL &&
1980      sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
1981    chunk_word_size = (size_t) small_chunk_size();
1982    if (word_size + Metachunk::overhead() > small_chunk_size()) {
1983      chunk_word_size = medium_chunk_size();
1984    }
1985  } else {
1986    chunk_word_size = medium_chunk_size();
1987  }
1988
1989  // Might still need a humongous chunk.  Enforce
1990  // humongous allocations sizes to be aligned up to
1991  // the smallest chunk size.
1992  size_t if_humongous_sized_chunk =
1993    align_size_up(word_size + Metachunk::overhead(),
1994                  smallest_chunk_size());
1995  chunk_word_size =
1996    MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
1997
1998  assert(!SpaceManager::is_humongous(word_size) ||
1999         chunk_word_size == if_humongous_sized_chunk,
2000         err_msg("Size calculation is wrong, word_size " SIZE_FORMAT
2001                 " chunk_word_size " SIZE_FORMAT,
2002                 word_size, chunk_word_size));
2003  if (TraceMetadataHumongousAllocation &&
2004      SpaceManager::is_humongous(word_size)) {
2005    gclog_or_tty->print_cr("Metadata humongous allocation:");
2006    gclog_or_tty->print_cr("  word_size " PTR_FORMAT, word_size);
2007    gclog_or_tty->print_cr("  chunk_word_size " PTR_FORMAT,
2008                           chunk_word_size);
2009    gclog_or_tty->print_cr("    chunk overhead " PTR_FORMAT,
2010                           Metachunk::overhead());
2011  }
2012  return chunk_word_size;
2013}
2014
2015void SpaceManager::track_metaspace_memory_usage() {
2016  if (is_init_completed()) {
2017    if (is_class()) {
2018      MemoryService::track_compressed_class_memory_usage();
2019    }
2020    MemoryService::track_metaspace_memory_usage();
2021  }
2022}
2023
2024MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
2025  assert(vs_list()->current_virtual_space() != NULL,
2026         "Should have been set");
2027  assert(current_chunk() == NULL ||
2028         current_chunk()->allocate(word_size) == NULL,
2029         "Don't need to expand");
2030  MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2031
2032  if (TraceMetadataChunkAllocation && Verbose) {
2033    size_t words_left = 0;
2034    size_t words_used = 0;
2035    if (current_chunk() != NULL) {
2036      words_left = current_chunk()->free_word_size();
2037      words_used = current_chunk()->used_word_size();
2038    }
2039    gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
2040                           " words " SIZE_FORMAT " words used " SIZE_FORMAT
2041                           " words left",
2042                            word_size, words_used, words_left);
2043  }
2044
2045  // Get another chunk out of the virtual space
2046  size_t grow_chunks_by_words = calc_chunk_size(word_size);
2047  Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
2048
2049  MetaWord* mem = NULL;
2050
2051  // If a chunk was available, add it to the in-use chunk list
2052  // and do an allocation from it.
2053  if (next != NULL) {
2054    // Add to this manager's list of chunks in use.
2055    add_chunk(next, false);
2056    mem = next->allocate(word_size);
2057  }
2058
2059  // Track metaspace memory usage statistic.
2060  track_metaspace_memory_usage();
2061
2062  return mem;
2063}
2064
2065void SpaceManager::print_on(outputStream* st) const {
2066
2067  for (ChunkIndex i = ZeroIndex;
2068       i < NumberOfInUseLists ;
2069       i = next_chunk_index(i) ) {
2070    st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
2071                 chunks_in_use(i),
2072                 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
2073  }
2074  st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
2075               " Humongous " SIZE_FORMAT,
2076               sum_waste_in_chunks_in_use(SmallIndex),
2077               sum_waste_in_chunks_in_use(MediumIndex),
2078               sum_waste_in_chunks_in_use(HumongousIndex));
2079  // block free lists
2080  if (block_freelists() != NULL) {
2081    st->print_cr("total in block free lists " SIZE_FORMAT,
2082      block_freelists()->total_size());
2083  }
2084}
2085
2086SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
2087                           Mutex* lock) :
2088  _mdtype(mdtype),
2089  _allocated_blocks_words(0),
2090  _allocated_chunks_words(0),
2091  _allocated_chunks_count(0),
2092  _lock(lock)
2093{
2094  initialize();
2095}
2096
2097void SpaceManager::inc_size_metrics(size_t words) {
2098  assert_lock_strong(SpaceManager::expand_lock());
2099  // Total of allocated Metachunks and allocated Metachunks count
2100  // for each SpaceManager
2101  _allocated_chunks_words = _allocated_chunks_words + words;
2102  _allocated_chunks_count++;
2103  // Global total of capacity in allocated Metachunks
2104  MetaspaceAux::inc_capacity(mdtype(), words);
2105  // Global total of allocated Metablocks.
2106  // used_words_slow() includes the overhead in each
2107  // Metachunk so include it in the used when the
2108  // Metachunk is first added (so only added once per
2109  // Metachunk).
2110  MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2111}
2112
2113void SpaceManager::inc_used_metrics(size_t words) {
2114  // Add to the per SpaceManager total
2115  Atomic::add_ptr(words, &_allocated_blocks_words);
2116  // Add to the global total
2117  MetaspaceAux::inc_used(mdtype(), words);
2118}
2119
2120void SpaceManager::dec_total_from_size_metrics() {
2121  MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2122  MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2123  // Also deduct the overhead per Metachunk
2124  MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2125}
2126
2127void SpaceManager::initialize() {
2128  Metadebug::init_allocation_fail_alot_count();
2129  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2130    _chunks_in_use[i] = NULL;
2131  }
2132  _current_chunk = NULL;
2133  if (TraceMetadataChunkAllocation && Verbose) {
2134    gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this);
2135  }
2136}
2137
2138void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
2139  if (chunks == NULL) {
2140    return;
2141  }
2142  ChunkList* list = free_chunks(index);
2143  assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
2144  assert_lock_strong(SpaceManager::expand_lock());
2145  Metachunk* cur = chunks;
2146
2147  // This returns chunks one at a time.  If a new
2148  // class List can be created that is a base class
2149  // of FreeList then something like FreeList::prepend()
2150  // can be used in place of this loop
2151  while (cur != NULL) {
2152    assert(cur->container() != NULL, "Container should have been set");
2153    cur->container()->dec_container_count();
2154    // Capture the next link before it is changed
2155    // by the call to return_chunk_at_head();
2156    Metachunk* next = cur->next();
2157    DEBUG_ONLY(cur->set_is_tagged_free(true);)
2158    list->return_chunk_at_head(cur);
2159    cur = next;
2160  }
2161}
2162
2163SpaceManager::~SpaceManager() {
2164  // This call this->_lock which can't be done while holding expand_lock()
2165  assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2166    err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2167            " allocated_chunks_words() " SIZE_FORMAT,
2168            sum_capacity_in_chunks_in_use(), allocated_chunks_words()));
2169
2170  MutexLockerEx fcl(SpaceManager::expand_lock(),
2171                    Mutex::_no_safepoint_check_flag);
2172
2173  chunk_manager()->slow_locked_verify();
2174
2175  dec_total_from_size_metrics();
2176
2177  if (TraceMetadataChunkAllocation && Verbose) {
2178    gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
2179    locked_print_chunks_in_use_on(gclog_or_tty);
2180  }
2181
2182  // Do not mangle freed Metachunks.  The chunk size inside Metachunks
2183  // is during the freeing of a VirtualSpaceNodes.
2184
2185  // Have to update before the chunks_in_use lists are emptied
2186  // below.
2187  chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
2188                                         sum_count_in_chunks_in_use());
2189
2190  // Add all the chunks in use by this space manager
2191  // to the global list of free chunks.
2192
2193  // Follow each list of chunks-in-use and add them to the
2194  // free lists.  Each list is NULL terminated.
2195
2196  for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
2197    if (TraceMetadataChunkAllocation && Verbose) {
2198      gclog_or_tty->print_cr("returned %d %s chunks to freelist",
2199                             sum_count_in_chunks_in_use(i),
2200                             chunk_size_name(i));
2201    }
2202    Metachunk* chunks = chunks_in_use(i);
2203    chunk_manager()->return_chunks(i, chunks);
2204    set_chunks_in_use(i, NULL);
2205    if (TraceMetadataChunkAllocation && Verbose) {
2206      gclog_or_tty->print_cr("updated freelist count %d %s",
2207                             chunk_manager()->free_chunks(i)->count(),
2208                             chunk_size_name(i));
2209    }
2210    assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2211  }
2212
2213  // The medium chunk case may be optimized by passing the head and
2214  // tail of the medium chunk list to add_at_head().  The tail is often
2215  // the current chunk but there are probably exceptions.
2216
2217  // Humongous chunks
2218  if (TraceMetadataChunkAllocation && Verbose) {
2219    gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary",
2220                            sum_count_in_chunks_in_use(HumongousIndex),
2221                            chunk_size_name(HumongousIndex));
2222    gclog_or_tty->print("Humongous chunk dictionary: ");
2223  }
2224  // Humongous chunks are never the current chunk.
2225  Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2226
2227  while (humongous_chunks != NULL) {
2228#ifdef ASSERT
2229    humongous_chunks->set_is_tagged_free(true);
2230#endif
2231    if (TraceMetadataChunkAllocation && Verbose) {
2232      gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
2233                          humongous_chunks,
2234                          humongous_chunks->word_size());
2235    }
2236    assert(humongous_chunks->word_size() == (size_t)
2237           align_size_up(humongous_chunks->word_size(),
2238                             smallest_chunk_size()),
2239           err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
2240                   " granularity %d",
2241                   humongous_chunks->word_size(), smallest_chunk_size()));
2242    Metachunk* next_humongous_chunks = humongous_chunks->next();
2243    humongous_chunks->container()->dec_container_count();
2244    chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
2245    humongous_chunks = next_humongous_chunks;
2246  }
2247  if (TraceMetadataChunkAllocation && Verbose) {
2248    gclog_or_tty->print_cr("");
2249    gclog_or_tty->print_cr("updated dictionary count %d %s",
2250                     chunk_manager()->humongous_dictionary()->total_count(),
2251                     chunk_size_name(HumongousIndex));
2252  }
2253  chunk_manager()->slow_locked_verify();
2254}
2255
2256const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
2257  switch (index) {
2258    case SpecializedIndex:
2259      return "Specialized";
2260    case SmallIndex:
2261      return "Small";
2262    case MediumIndex:
2263      return "Medium";
2264    case HumongousIndex:
2265      return "Humongous";
2266    default:
2267      return NULL;
2268  }
2269}
2270
2271ChunkIndex ChunkManager::list_index(size_t size) {
2272  switch (size) {
2273    case SpecializedChunk:
2274      assert(SpecializedChunk == ClassSpecializedChunk,
2275             "Need branch for ClassSpecializedChunk");
2276      return SpecializedIndex;
2277    case SmallChunk:
2278    case ClassSmallChunk:
2279      return SmallIndex;
2280    case MediumChunk:
2281    case ClassMediumChunk:
2282      return MediumIndex;
2283    default:
2284      assert(size > MediumChunk || size > ClassMediumChunk,
2285             "Not a humongous chunk");
2286      return HumongousIndex;
2287  }
2288}
2289
2290void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2291  assert_lock_strong(_lock);
2292  size_t raw_word_size = get_raw_word_size(word_size);
2293  size_t min_size = TreeChunk<Metablock, FreeList<Metablock> >::min_size();
2294  assert(raw_word_size >= min_size,
2295         err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
2296  block_freelists()->return_block(p, raw_word_size);
2297}
2298
2299// Adds a chunk to the list of chunks in use.
2300void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2301
2302  assert(new_chunk != NULL, "Should not be NULL");
2303  assert(new_chunk->next() == NULL, "Should not be on a list");
2304
2305  new_chunk->reset_empty();
2306
2307  // Find the correct list and and set the current
2308  // chunk for that list.
2309  ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
2310
2311  if (index != HumongousIndex) {
2312    retire_current_chunk();
2313    set_current_chunk(new_chunk);
2314    new_chunk->set_next(chunks_in_use(index));
2315    set_chunks_in_use(index, new_chunk);
2316  } else {
2317    // For null class loader data and DumpSharedSpaces, the first chunk isn't
2318    // small, so small will be null.  Link this first chunk as the current
2319    // chunk.
2320    if (make_current) {
2321      // Set as the current chunk but otherwise treat as a humongous chunk.
2322      set_current_chunk(new_chunk);
2323    }
2324    // Link at head.  The _current_chunk only points to a humongous chunk for
2325    // the null class loader metaspace (class and data virtual space managers)
2326    // any humongous chunks so will not point to the tail
2327    // of the humongous chunks list.
2328    new_chunk->set_next(chunks_in_use(HumongousIndex));
2329    set_chunks_in_use(HumongousIndex, new_chunk);
2330
2331    assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2332  }
2333
2334  // Add to the running sum of capacity
2335  inc_size_metrics(new_chunk->word_size());
2336
2337  assert(new_chunk->is_empty(), "Not ready for reuse");
2338  if (TraceMetadataChunkAllocation && Verbose) {
2339    gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
2340                        sum_count_in_chunks_in_use());
2341    new_chunk->print_on(gclog_or_tty);
2342    chunk_manager()->locked_print_free_chunks(gclog_or_tty);
2343  }
2344}
2345
2346void SpaceManager::retire_current_chunk() {
2347  if (current_chunk() != NULL) {
2348    size_t remaining_words = current_chunk()->free_word_size();
2349    if (remaining_words >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
2350      block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
2351      inc_used_metrics(remaining_words);
2352    }
2353  }
2354}
2355
2356Metachunk* SpaceManager::get_new_chunk(size_t word_size,
2357                                       size_t grow_chunks_by_words) {
2358  // Get a chunk from the chunk freelist
2359  Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
2360
2361  if (next == NULL) {
2362    next = vs_list()->get_new_chunk(word_size,
2363                                    grow_chunks_by_words,
2364                                    medium_chunk_bunch());
2365  }
2366
2367  if (TraceMetadataHumongousAllocation && next != NULL &&
2368      SpaceManager::is_humongous(next->word_size())) {
2369    gclog_or_tty->print_cr("  new humongous chunk word size "
2370                           PTR_FORMAT, next->word_size());
2371  }
2372
2373  return next;
2374}
2375
2376MetaWord* SpaceManager::allocate(size_t word_size) {
2377  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2378
2379  size_t raw_word_size = get_raw_word_size(word_size);
2380  BlockFreelist* fl =  block_freelists();
2381  MetaWord* p = NULL;
2382  // Allocation from the dictionary is expensive in the sense that
2383  // the dictionary has to be searched for a size.  Don't allocate
2384  // from the dictionary until it starts to get fat.  Is this
2385  // a reasonable policy?  Maybe an skinny dictionary is fast enough
2386  // for allocations.  Do some profiling.  JJJ
2387  if (fl->total_size() > allocation_from_dictionary_limit) {
2388    p = fl->get_block(raw_word_size);
2389  }
2390  if (p == NULL) {
2391    p = allocate_work(raw_word_size);
2392  }
2393
2394  return p;
2395}
2396
2397// Returns the address of spaced allocated for "word_size".
2398// This methods does not know about blocks (Metablocks)
2399MetaWord* SpaceManager::allocate_work(size_t word_size) {
2400  assert_lock_strong(_lock);
2401#ifdef ASSERT
2402  if (Metadebug::test_metadata_failure()) {
2403    return NULL;
2404  }
2405#endif
2406  // Is there space in the current chunk?
2407  MetaWord* result = NULL;
2408
2409  // For DumpSharedSpaces, only allocate out of the current chunk which is
2410  // never null because we gave it the size we wanted.   Caller reports out
2411  // of memory if this returns null.
2412  if (DumpSharedSpaces) {
2413    assert(current_chunk() != NULL, "should never happen");
2414    inc_used_metrics(word_size);
2415    return current_chunk()->allocate(word_size); // caller handles null result
2416  }
2417
2418  if (current_chunk() != NULL) {
2419    result = current_chunk()->allocate(word_size);
2420  }
2421
2422  if (result == NULL) {
2423    result = grow_and_allocate(word_size);
2424  }
2425
2426  if (result != NULL) {
2427    inc_used_metrics(word_size);
2428    assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2429           "Head of the list is being allocated");
2430  }
2431
2432  return result;
2433}
2434
2435// This function looks at the chunks in the metaspace without locking.
2436// The chunks are added with store ordering and not deleted except for at
2437// unloading time.
2438bool SpaceManager::contains(const void *ptr) {
2439  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i))
2440  {
2441    Metachunk* curr = chunks_in_use(i);
2442    while (curr != NULL) {
2443      if (curr->contains(ptr)) return true;
2444      curr = curr->next();
2445    }
2446  }
2447  return false;
2448}
2449
2450void SpaceManager::verify() {
2451  // If there are blocks in the dictionary, then
2452  // verification of chunks does not work since
2453  // being in the dictionary alters a chunk.
2454  if (block_freelists()->total_size() == 0) {
2455    for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2456      Metachunk* curr = chunks_in_use(i);
2457      while (curr != NULL) {
2458        curr->verify();
2459        verify_chunk_size(curr);
2460        curr = curr->next();
2461      }
2462    }
2463  }
2464}
2465
2466void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2467  assert(is_humongous(chunk->word_size()) ||
2468         chunk->word_size() == medium_chunk_size() ||
2469         chunk->word_size() == small_chunk_size() ||
2470         chunk->word_size() == specialized_chunk_size(),
2471         "Chunk size is wrong");
2472  return;
2473}
2474
2475#ifdef ASSERT
2476void SpaceManager::verify_allocated_blocks_words() {
2477  // Verification is only guaranteed at a safepoint.
2478  assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
2479    "Verification can fail if the applications is running");
2480  assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2481    err_msg("allocation total is not consistent " SIZE_FORMAT
2482            " vs " SIZE_FORMAT,
2483            allocated_blocks_words(), sum_used_in_chunks_in_use()));
2484}
2485
2486#endif
2487
2488void SpaceManager::dump(outputStream* const out) const {
2489  size_t curr_total = 0;
2490  size_t waste = 0;
2491  uint i = 0;
2492  size_t used = 0;
2493  size_t capacity = 0;
2494
2495  // Add up statistics for all chunks in this SpaceManager.
2496  for (ChunkIndex index = ZeroIndex;
2497       index < NumberOfInUseLists;
2498       index = next_chunk_index(index)) {
2499    for (Metachunk* curr = chunks_in_use(index);
2500         curr != NULL;
2501         curr = curr->next()) {
2502      out->print("%d) ", i++);
2503      curr->print_on(out);
2504      curr_total += curr->word_size();
2505      used += curr->used_word_size();
2506      capacity += curr->word_size();
2507      waste += curr->free_word_size() + curr->overhead();;
2508    }
2509  }
2510
2511  if (TraceMetadataChunkAllocation && Verbose) {
2512    block_freelists()->print_on(out);
2513  }
2514
2515  size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2516  // Free space isn't wasted.
2517  waste -= free;
2518
2519  out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
2520                " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2521                " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2522}
2523
2524#ifndef PRODUCT
2525void SpaceManager::mangle_freed_chunks() {
2526  for (ChunkIndex index = ZeroIndex;
2527       index < NumberOfInUseLists;
2528       index = next_chunk_index(index)) {
2529    for (Metachunk* curr = chunks_in_use(index);
2530         curr != NULL;
2531         curr = curr->next()) {
2532      curr->mangle();
2533    }
2534  }
2535}
2536#endif // PRODUCT
2537
2538// MetaspaceAux
2539
2540
2541size_t MetaspaceAux::_capacity_words[] = {0, 0};
2542size_t MetaspaceAux::_used_words[] = {0, 0};
2543
2544size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
2545  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2546  return list == NULL ? 0 : list->free_bytes();
2547}
2548
2549size_t MetaspaceAux::free_bytes() {
2550  return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
2551}
2552
2553void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2554  assert_lock_strong(SpaceManager::expand_lock());
2555  assert(words <= capacity_words(mdtype),
2556    err_msg("About to decrement below 0: words " SIZE_FORMAT
2557            " is greater than _capacity_words[%u] " SIZE_FORMAT,
2558            words, mdtype, capacity_words(mdtype)));
2559  _capacity_words[mdtype] -= words;
2560}
2561
2562void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2563  assert_lock_strong(SpaceManager::expand_lock());
2564  // Needs to be atomic
2565  _capacity_words[mdtype] += words;
2566}
2567
2568void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2569  assert(words <= used_words(mdtype),
2570    err_msg("About to decrement below 0: words " SIZE_FORMAT
2571            " is greater than _used_words[%u] " SIZE_FORMAT,
2572            words, mdtype, used_words(mdtype)));
2573  // For CMS deallocation of the Metaspaces occurs during the
2574  // sweep which is a concurrent phase.  Protection by the expand_lock()
2575  // is not enough since allocation is on a per Metaspace basis
2576  // and protected by the Metaspace lock.
2577  jlong minus_words = (jlong) - (jlong) words;
2578  Atomic::add_ptr(minus_words, &_used_words[mdtype]);
2579}
2580
2581void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2582  // _used_words tracks allocations for
2583  // each piece of metadata.  Those allocations are
2584  // generally done concurrently by different application
2585  // threads so must be done atomically.
2586  Atomic::add_ptr(words, &_used_words[mdtype]);
2587}
2588
2589size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2590  size_t used = 0;
2591  ClassLoaderDataGraphMetaspaceIterator iter;
2592  while (iter.repeat()) {
2593    Metaspace* msp = iter.get_next();
2594    // Sum allocated_blocks_words for each metaspace
2595    if (msp != NULL) {
2596      used += msp->used_words_slow(mdtype);
2597    }
2598  }
2599  return used * BytesPerWord;
2600}
2601
2602size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
2603  size_t free = 0;
2604  ClassLoaderDataGraphMetaspaceIterator iter;
2605  while (iter.repeat()) {
2606    Metaspace* msp = iter.get_next();
2607    if (msp != NULL) {
2608      free += msp->free_words_slow(mdtype);
2609    }
2610  }
2611  return free * BytesPerWord;
2612}
2613
2614size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2615  if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
2616    return 0;
2617  }
2618  // Don't count the space in the freelists.  That space will be
2619  // added to the capacity calculation as needed.
2620  size_t capacity = 0;
2621  ClassLoaderDataGraphMetaspaceIterator iter;
2622  while (iter.repeat()) {
2623    Metaspace* msp = iter.get_next();
2624    if (msp != NULL) {
2625      capacity += msp->capacity_words_slow(mdtype);
2626    }
2627  }
2628  return capacity * BytesPerWord;
2629}
2630
2631size_t MetaspaceAux::capacity_bytes_slow() {
2632#ifdef PRODUCT
2633  // Use capacity_bytes() in PRODUCT instead of this function.
2634  guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
2635#endif
2636  size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
2637  size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
2638  assert(capacity_bytes() == class_capacity + non_class_capacity,
2639      err_msg("bad accounting: capacity_bytes() " SIZE_FORMAT
2640        " class_capacity + non_class_capacity " SIZE_FORMAT
2641        " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
2642        capacity_bytes(), class_capacity + non_class_capacity,
2643        class_capacity, non_class_capacity));
2644
2645  return class_capacity + non_class_capacity;
2646}
2647
2648size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
2649  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2650  return list == NULL ? 0 : list->reserved_bytes();
2651}
2652
2653size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
2654  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2655  return list == NULL ? 0 : list->committed_bytes();
2656}
2657
2658size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
2659
2660size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
2661  ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
2662  if (chunk_manager == NULL) {
2663    return 0;
2664  }
2665  chunk_manager->slow_verify();
2666  return chunk_manager->free_chunks_total_words();
2667}
2668
2669size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
2670  return free_chunks_total_words(mdtype) * BytesPerWord;
2671}
2672
2673size_t MetaspaceAux::free_chunks_total_words() {
2674  return free_chunks_total_words(Metaspace::ClassType) +
2675         free_chunks_total_words(Metaspace::NonClassType);
2676}
2677
2678size_t MetaspaceAux::free_chunks_total_bytes() {
2679  return free_chunks_total_words() * BytesPerWord;
2680}
2681
2682bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) {
2683  return Metaspace::get_chunk_manager(mdtype) != NULL;
2684}
2685
2686MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
2687  if (!has_chunk_free_list(mdtype)) {
2688    return MetaspaceChunkFreeListSummary();
2689  }
2690
2691  const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
2692  return cm->chunk_free_list_summary();
2693}
2694
2695void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2696  gclog_or_tty->print(", [Metaspace:");
2697  if (PrintGCDetails && Verbose) {
2698    gclog_or_tty->print(" "  SIZE_FORMAT
2699                        "->" SIZE_FORMAT
2700                        "("  SIZE_FORMAT ")",
2701                        prev_metadata_used,
2702                        used_bytes(),
2703                        reserved_bytes());
2704  } else {
2705    gclog_or_tty->print(" "  SIZE_FORMAT "K"
2706                        "->" SIZE_FORMAT "K"
2707                        "("  SIZE_FORMAT "K)",
2708                        prev_metadata_used/K,
2709                        used_bytes()/K,
2710                        reserved_bytes()/K);
2711  }
2712
2713  gclog_or_tty->print("]");
2714}
2715
2716// This is printed when PrintGCDetails
2717void MetaspaceAux::print_on(outputStream* out) {
2718  Metaspace::MetadataType nct = Metaspace::NonClassType;
2719
2720  out->print_cr(" Metaspace       "
2721                "used "      SIZE_FORMAT "K, "
2722                "capacity "  SIZE_FORMAT "K, "
2723                "committed " SIZE_FORMAT "K, "
2724                "reserved "  SIZE_FORMAT "K",
2725                used_bytes()/K,
2726                capacity_bytes()/K,
2727                committed_bytes()/K,
2728                reserved_bytes()/K);
2729
2730  if (Metaspace::using_class_space()) {
2731    Metaspace::MetadataType ct = Metaspace::ClassType;
2732    out->print_cr("  class space    "
2733                  "used "      SIZE_FORMAT "K, "
2734                  "capacity "  SIZE_FORMAT "K, "
2735                  "committed " SIZE_FORMAT "K, "
2736                  "reserved "  SIZE_FORMAT "K",
2737                  used_bytes(ct)/K,
2738                  capacity_bytes(ct)/K,
2739                  committed_bytes(ct)/K,
2740                  reserved_bytes(ct)/K);
2741  }
2742}
2743
2744// Print information for class space and data space separately.
2745// This is almost the same as above.
2746void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2747  size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
2748  size_t capacity_bytes = capacity_bytes_slow(mdtype);
2749  size_t used_bytes = used_bytes_slow(mdtype);
2750  size_t free_bytes = free_bytes_slow(mdtype);
2751  size_t used_and_free = used_bytes + free_bytes +
2752                           free_chunks_capacity_bytes;
2753  out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
2754             "K + unused in chunks " SIZE_FORMAT "K  + "
2755             " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2756             "K  capacity in allocated chunks " SIZE_FORMAT "K",
2757             used_bytes / K,
2758             free_bytes / K,
2759             free_chunks_capacity_bytes / K,
2760             used_and_free / K,
2761             capacity_bytes / K);
2762  // Accounting can only be correct if we got the values during a safepoint
2763  assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2764}
2765
2766// Print total fragmentation for class metaspaces
2767void MetaspaceAux::print_class_waste(outputStream* out) {
2768  assert(Metaspace::using_class_space(), "class metaspace not used");
2769  size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
2770  size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
2771  ClassLoaderDataGraphMetaspaceIterator iter;
2772  while (iter.repeat()) {
2773    Metaspace* msp = iter.get_next();
2774    if (msp != NULL) {
2775      cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2776      cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2777      cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2778      cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2779      cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2780      cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2781      cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2782    }
2783  }
2784  out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2785                SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2786                SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2787                "large count " SIZE_FORMAT,
2788                cls_specialized_count, cls_specialized_waste,
2789                cls_small_count, cls_small_waste,
2790                cls_medium_count, cls_medium_waste, cls_humongous_count);
2791}
2792
2793// Print total fragmentation for data and class metaspaces separately
2794void MetaspaceAux::print_waste(outputStream* out) {
2795  size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
2796  size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
2797
2798  ClassLoaderDataGraphMetaspaceIterator iter;
2799  while (iter.repeat()) {
2800    Metaspace* msp = iter.get_next();
2801    if (msp != NULL) {
2802      specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2803      specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2804      small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2805      small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2806      medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2807      medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2808      humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2809    }
2810  }
2811  out->print_cr("Total fragmentation waste (words) doesn't count free space");
2812  out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2813                        SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2814                        SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2815                        "large count " SIZE_FORMAT,
2816             specialized_count, specialized_waste, small_count,
2817             small_waste, medium_count, medium_waste, humongous_count);
2818  if (Metaspace::using_class_space()) {
2819    print_class_waste(out);
2820  }
2821}
2822
2823// Dump global metaspace things from the end of ClassLoaderDataGraph
2824void MetaspaceAux::dump(outputStream* out) {
2825  out->print_cr("All Metaspace:");
2826  out->print("data space: "); print_on(out, Metaspace::NonClassType);
2827  out->print("class space: "); print_on(out, Metaspace::ClassType);
2828  print_waste(out);
2829}
2830
2831void MetaspaceAux::verify_free_chunks() {
2832  Metaspace::chunk_manager_metadata()->verify();
2833  if (Metaspace::using_class_space()) {
2834    Metaspace::chunk_manager_class()->verify();
2835  }
2836}
2837
2838void MetaspaceAux::verify_capacity() {
2839#ifdef ASSERT
2840  size_t running_sum_capacity_bytes = capacity_bytes();
2841  // For purposes of the running sum of capacity, verify against capacity
2842  size_t capacity_in_use_bytes = capacity_bytes_slow();
2843  assert(running_sum_capacity_bytes == capacity_in_use_bytes,
2844    err_msg("capacity_words() * BytesPerWord " SIZE_FORMAT
2845            " capacity_bytes_slow()" SIZE_FORMAT,
2846            running_sum_capacity_bytes, capacity_in_use_bytes));
2847  for (Metaspace::MetadataType i = Metaspace::ClassType;
2848       i < Metaspace:: MetadataTypeCount;
2849       i = (Metaspace::MetadataType)(i + 1)) {
2850    size_t capacity_in_use_bytes = capacity_bytes_slow(i);
2851    assert(capacity_bytes(i) == capacity_in_use_bytes,
2852      err_msg("capacity_bytes(%u) " SIZE_FORMAT
2853              " capacity_bytes_slow(%u)" SIZE_FORMAT,
2854              i, capacity_bytes(i), i, capacity_in_use_bytes));
2855  }
2856#endif
2857}
2858
2859void MetaspaceAux::verify_used() {
2860#ifdef ASSERT
2861  size_t running_sum_used_bytes = used_bytes();
2862  // For purposes of the running sum of used, verify against used
2863  size_t used_in_use_bytes = used_bytes_slow();
2864  assert(used_bytes() == used_in_use_bytes,
2865    err_msg("used_bytes() " SIZE_FORMAT
2866            " used_bytes_slow()" SIZE_FORMAT,
2867            used_bytes(), used_in_use_bytes));
2868  for (Metaspace::MetadataType i = Metaspace::ClassType;
2869       i < Metaspace:: MetadataTypeCount;
2870       i = (Metaspace::MetadataType)(i + 1)) {
2871    size_t used_in_use_bytes = used_bytes_slow(i);
2872    assert(used_bytes(i) == used_in_use_bytes,
2873      err_msg("used_bytes(%u) " SIZE_FORMAT
2874              " used_bytes_slow(%u)" SIZE_FORMAT,
2875              i, used_bytes(i), i, used_in_use_bytes));
2876  }
2877#endif
2878}
2879
2880void MetaspaceAux::verify_metrics() {
2881  verify_capacity();
2882  verify_used();
2883}
2884
2885
2886// Metaspace methods
2887
2888size_t Metaspace::_first_chunk_word_size = 0;
2889size_t Metaspace::_first_class_chunk_word_size = 0;
2890
2891size_t Metaspace::_commit_alignment = 0;
2892size_t Metaspace::_reserve_alignment = 0;
2893
2894Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2895  initialize(lock, type);
2896}
2897
2898Metaspace::~Metaspace() {
2899  delete _vsm;
2900  if (using_class_space()) {
2901    delete _class_vsm;
2902  }
2903}
2904
2905VirtualSpaceList* Metaspace::_space_list = NULL;
2906VirtualSpaceList* Metaspace::_class_space_list = NULL;
2907
2908ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
2909ChunkManager* Metaspace::_chunk_manager_class = NULL;
2910
2911#define VIRTUALSPACEMULTIPLIER 2
2912
2913#ifdef _LP64
2914static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
2915
2916void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
2917  // Figure out the narrow_klass_base and the narrow_klass_shift.  The
2918  // narrow_klass_base is the lower of the metaspace base and the cds base
2919  // (if cds is enabled).  The narrow_klass_shift depends on the distance
2920  // between the lower base and higher address.
2921  address lower_base;
2922  address higher_address;
2923  if (UseSharedSpaces) {
2924    higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2925                          (address)(metaspace_base + compressed_class_space_size()));
2926    lower_base = MIN2(metaspace_base, cds_base);
2927  } else {
2928    higher_address = metaspace_base + compressed_class_space_size();
2929    lower_base = metaspace_base;
2930
2931    uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
2932    // If compressed class space fits in lower 32G, we don't need a base.
2933    if (higher_address <= (address)klass_encoding_max) {
2934      lower_base = 0; // Effectively lower base is zero.
2935    }
2936  }
2937
2938  Universe::set_narrow_klass_base(lower_base);
2939
2940  if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
2941    Universe::set_narrow_klass_shift(0);
2942  } else {
2943    assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
2944    Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
2945  }
2946}
2947
2948// Return TRUE if the specified metaspace_base and cds_base are close enough
2949// to work with compressed klass pointers.
2950bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
2951  assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
2952  assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2953  address lower_base = MIN2((address)metaspace_base, cds_base);
2954  address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2955                                (address)(metaspace_base + compressed_class_space_size()));
2956  return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
2957}
2958
2959// Try to allocate the metaspace at the requested addr.
2960void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
2961  assert(using_class_space(), "called improperly");
2962  assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2963  assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
2964         "Metaspace size is too big");
2965  assert_is_ptr_aligned(requested_addr, _reserve_alignment);
2966  assert_is_ptr_aligned(cds_base, _reserve_alignment);
2967  assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment);
2968
2969  // Don't use large pages for the class space.
2970  bool large_pages = false;
2971
2972  ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
2973                                             _reserve_alignment,
2974                                             large_pages,
2975                                             requested_addr, 0);
2976  if (!metaspace_rs.is_reserved()) {
2977    if (UseSharedSpaces) {
2978      size_t increment = align_size_up(1*G, _reserve_alignment);
2979
2980      // Keep trying to allocate the metaspace, increasing the requested_addr
2981      // by 1GB each time, until we reach an address that will no longer allow
2982      // use of CDS with compressed klass pointers.
2983      char *addr = requested_addr;
2984      while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
2985             can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
2986        addr = addr + increment;
2987        metaspace_rs = ReservedSpace(compressed_class_space_size(),
2988                                     _reserve_alignment, large_pages, addr, 0);
2989      }
2990    }
2991
2992    // If no successful allocation then try to allocate the space anywhere.  If
2993    // that fails then OOM doom.  At this point we cannot try allocating the
2994    // metaspace as if UseCompressedClassPointers is off because too much
2995    // initialization has happened that depends on UseCompressedClassPointers.
2996    // So, UseCompressedClassPointers cannot be turned off at this point.
2997    if (!metaspace_rs.is_reserved()) {
2998      metaspace_rs = ReservedSpace(compressed_class_space_size(),
2999                                   _reserve_alignment, large_pages);
3000      if (!metaspace_rs.is_reserved()) {
3001        vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
3002                                              compressed_class_space_size()));
3003      }
3004    }
3005  }
3006
3007  // If we got here then the metaspace got allocated.
3008  MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3009
3010  // Verify that we can use shared spaces.  Otherwise, turn off CDS.
3011  if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3012    FileMapInfo::stop_sharing_and_unmap(
3013        "Could not allocate metaspace at a compatible address");
3014  }
3015
3016  set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3017                                  UseSharedSpaces ? (address)cds_base : 0);
3018
3019  initialize_class_space(metaspace_rs);
3020
3021  if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
3022    gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
3023                            Universe::narrow_klass_base(), Universe::narrow_klass_shift());
3024    gclog_or_tty->print_cr("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
3025                           compressed_class_space_size(), metaspace_rs.base(), requested_addr);
3026  }
3027}
3028
3029// For UseCompressedClassPointers the class space is reserved above the top of
3030// the Java heap.  The argument passed in is at the base of the compressed space.
3031void Metaspace::initialize_class_space(ReservedSpace rs) {
3032  // The reserved space size may be bigger because of alignment, esp with UseLargePages
3033  assert(rs.size() >= CompressedClassSpaceSize,
3034         err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
3035  assert(using_class_space(), "Must be using class space");
3036  _class_space_list = new VirtualSpaceList(rs);
3037  _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
3038
3039  if (!_class_space_list->initialization_succeeded()) {
3040    vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
3041  }
3042}
3043
3044#endif
3045
3046void Metaspace::ergo_initialize() {
3047  if (DumpSharedSpaces) {
3048    // Using large pages when dumping the shared archive is currently not implemented.
3049    FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
3050  }
3051
3052  size_t page_size = os::vm_page_size();
3053  if (UseLargePages && UseLargePagesInMetaspace) {
3054    page_size = os::large_page_size();
3055  }
3056
3057  _commit_alignment  = page_size;
3058  _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
3059
3060  // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
3061  // override if MaxMetaspaceSize was set on the command line or not.
3062  // This information is needed later to conform to the specification of the
3063  // java.lang.management.MemoryUsage API.
3064  //
3065  // Ideally, we would be able to set the default value of MaxMetaspaceSize in
3066  // globals.hpp to the aligned value, but this is not possible, since the
3067  // alignment depends on other flags being parsed.
3068  MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment);
3069
3070  if (MetaspaceSize > MaxMetaspaceSize) {
3071    MetaspaceSize = MaxMetaspaceSize;
3072  }
3073
3074  MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment);
3075
3076  assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
3077
3078  if (MetaspaceSize < 256*K) {
3079    vm_exit_during_initialization("Too small initial Metaspace size");
3080  }
3081
3082  MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment);
3083  MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
3084
3085  CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
3086  set_compressed_class_space_size(CompressedClassSpaceSize);
3087}
3088
3089void Metaspace::global_initialize() {
3090  // Initialize the alignment for shared spaces.
3091  int max_alignment = os::vm_page_size();
3092  size_t cds_total = 0;
3093
3094  MetaspaceShared::set_max_alignment(max_alignment);
3095
3096  if (DumpSharedSpaces) {
3097    SharedReadOnlySize  = align_size_up(SharedReadOnlySize,  max_alignment);
3098    SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
3099    SharedMiscDataSize  = align_size_up(SharedMiscDataSize,  max_alignment);
3100    SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize,  max_alignment);
3101
3102    // Initialize with the sum of the shared space sizes.  The read-only
3103    // and read write metaspace chunks will be allocated out of this and the
3104    // remainder is the misc code and data chunks.
3105    cds_total = FileMapInfo::shared_spaces_size();
3106    cds_total = align_size_up(cds_total, _reserve_alignment);
3107    _space_list = new VirtualSpaceList(cds_total/wordSize);
3108    _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3109
3110    if (!_space_list->initialization_succeeded()) {
3111      vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3112    }
3113
3114#ifdef _LP64
3115    if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
3116      vm_exit_during_initialization("Unable to dump shared archive.",
3117          err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3118                  SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3119                  "klass limit: " SIZE_FORMAT, cds_total, compressed_class_space_size(),
3120                  cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
3121    }
3122
3123    // Set the compressed klass pointer base so that decoding of these pointers works
3124    // properly when creating the shared archive.
3125    assert(UseCompressedOops && UseCompressedClassPointers,
3126      "UseCompressedOops and UseCompressedClassPointers must be set");
3127    Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
3128    if (TraceMetavirtualspaceAllocation && Verbose) {
3129      gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
3130                             _space_list->current_virtual_space()->bottom());
3131    }
3132
3133    Universe::set_narrow_klass_shift(0);
3134#endif
3135
3136  } else {
3137    // If using shared space, open the file that contains the shared space
3138    // and map in the memory before initializing the rest of metaspace (so
3139    // the addresses don't conflict)
3140    address cds_address = NULL;
3141    if (UseSharedSpaces) {
3142      FileMapInfo* mapinfo = new FileMapInfo();
3143      memset(mapinfo, 0, sizeof(FileMapInfo));
3144
3145      // Open the shared archive file, read and validate the header. If
3146      // initialization fails, shared spaces [UseSharedSpaces] are
3147      // disabled and the file is closed.
3148      // Map in spaces now also
3149      if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3150        FileMapInfo::set_current_info(mapinfo);
3151        cds_total = FileMapInfo::shared_spaces_size();
3152        cds_address = (address)mapinfo->region_base(0);
3153      } else {
3154        assert(!mapinfo->is_open() && !UseSharedSpaces,
3155               "archive file not closed or shared spaces not disabled.");
3156      }
3157    }
3158
3159#ifdef _LP64
3160    // If UseCompressedClassPointers is set then allocate the metaspace area
3161    // above the heap and above the CDS area (if it exists).
3162    if (using_class_space()) {
3163      if (UseSharedSpaces) {
3164        char* cds_end = (char*)(cds_address + cds_total);
3165        cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
3166        allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3167      } else {
3168        char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
3169        allocate_metaspace_compressed_klass_ptrs(base, 0);
3170      }
3171    }
3172#endif
3173
3174    // Initialize these before initializing the VirtualSpaceList
3175    _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3176    _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3177    // Make the first class chunk bigger than a medium chunk so it's not put
3178    // on the medium chunk list.   The next chunk will be small and progress
3179    // from there.  This size calculated by -version.
3180    _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3181                                       (CompressedClassSpaceSize/BytesPerWord)*2);
3182    _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3183    // Arbitrarily set the initial virtual space to a multiple
3184    // of the boot class loader size.
3185    size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3186    word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
3187
3188    // Initialize the list of virtual spaces.
3189    _space_list = new VirtualSpaceList(word_size);
3190    _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3191
3192    if (!_space_list->initialization_succeeded()) {
3193      vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
3194    }
3195  }
3196
3197  MetaspaceGC::initialize();
3198  _tracer = new MetaspaceTracer();
3199}
3200
3201Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
3202                                               size_t chunk_word_size,
3203                                               size_t chunk_bunch) {
3204  // Get a chunk from the chunk freelist
3205  Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
3206  if (chunk != NULL) {
3207    return chunk;
3208  }
3209
3210  return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch);
3211}
3212
3213void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3214
3215  assert(space_list() != NULL,
3216    "Metadata VirtualSpaceList has not been initialized");
3217  assert(chunk_manager_metadata() != NULL,
3218    "Metadata ChunkManager has not been initialized");
3219
3220  _vsm = new SpaceManager(NonClassType, lock);
3221  if (_vsm == NULL) {
3222    return;
3223  }
3224  size_t word_size;
3225  size_t class_word_size;
3226  vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
3227
3228  if (using_class_space()) {
3229  assert(class_space_list() != NULL,
3230    "Class VirtualSpaceList has not been initialized");
3231  assert(chunk_manager_class() != NULL,
3232    "Class ChunkManager has not been initialized");
3233
3234    // Allocate SpaceManager for classes.
3235    _class_vsm = new SpaceManager(ClassType, lock);
3236    if (_class_vsm == NULL) {
3237      return;
3238    }
3239  }
3240
3241  MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3242
3243  // Allocate chunk for metadata objects
3244  Metachunk* new_chunk = get_initialization_chunk(NonClassType,
3245                                                  word_size,
3246                                                  vsm()->medium_chunk_bunch());
3247  assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
3248  if (new_chunk != NULL) {
3249    // Add to this manager's list of chunks in use and current_chunk().
3250    vsm()->add_chunk(new_chunk, true);
3251  }
3252
3253  // Allocate chunk for class metadata objects
3254  if (using_class_space()) {
3255    Metachunk* class_chunk = get_initialization_chunk(ClassType,
3256                                                      class_word_size,
3257                                                      class_vsm()->medium_chunk_bunch());
3258    if (class_chunk != NULL) {
3259      class_vsm()->add_chunk(class_chunk, true);
3260    }
3261  }
3262
3263  _alloc_record_head = NULL;
3264  _alloc_record_tail = NULL;
3265}
3266
3267size_t Metaspace::align_word_size_up(size_t word_size) {
3268  size_t byte_size = word_size * wordSize;
3269  return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
3270}
3271
3272MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
3273  // DumpSharedSpaces doesn't use class metadata area (yet)
3274  // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
3275  if (is_class_space_allocation(mdtype)) {
3276    return  class_vsm()->allocate(word_size);
3277  } else {
3278    return  vsm()->allocate(word_size);
3279  }
3280}
3281
3282MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3283  size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3284  assert(delta_bytes > 0, "Must be");
3285
3286  size_t after_inc = MetaspaceGC::inc_capacity_until_GC(delta_bytes);
3287
3288  // capacity_until_GC might be updated concurrently, must calculate previous value.
3289  size_t before_inc = after_inc - delta_bytes;
3290
3291  tracer()->report_gc_threshold(before_inc, after_inc,
3292                                MetaspaceGCThresholdUpdater::ExpandAndAllocate);
3293  if (PrintGCDetails && Verbose) {
3294    gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
3295        " to " SIZE_FORMAT, before_inc, after_inc);
3296  }
3297
3298  return allocate(word_size, mdtype);
3299}
3300
3301// Space allocated in the Metaspace.  This may
3302// be across several metadata virtual spaces.
3303char* Metaspace::bottom() const {
3304  assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
3305  return (char*)vsm()->current_chunk()->bottom();
3306}
3307
3308size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3309  if (mdtype == ClassType) {
3310    return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3311  } else {
3312    return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
3313  }
3314}
3315
3316size_t Metaspace::free_words_slow(MetadataType mdtype) const {
3317  if (mdtype == ClassType) {
3318    return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
3319  } else {
3320    return vsm()->sum_free_in_chunks_in_use();
3321  }
3322}
3323
3324// Space capacity in the Metaspace.  It includes
3325// space in the list of chunks from which allocations
3326// have been made. Don't include space in the global freelist and
3327// in the space available in the dictionary which
3328// is already counted in some chunk.
3329size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
3330  if (mdtype == ClassType) {
3331    return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
3332  } else {
3333    return vsm()->sum_capacity_in_chunks_in_use();
3334  }
3335}
3336
3337size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
3338  return used_words_slow(mdtype) * BytesPerWord;
3339}
3340
3341size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
3342  return capacity_words_slow(mdtype) * BytesPerWord;
3343}
3344
3345void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
3346  assert(!SafepointSynchronize::is_at_safepoint()
3347         || Thread::current()->is_VM_thread(), "should be the VM thread");
3348
3349  MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3350
3351  if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
3352    // Dark matter.  Too small for dictionary.
3353#ifdef ASSERT
3354    Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3355#endif
3356    return;
3357  }
3358  if (is_class && using_class_space()) {
3359    class_vsm()->deallocate(ptr, word_size);
3360  } else {
3361    vsm()->deallocate(ptr, word_size);
3362  }
3363}
3364
3365
3366MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3367                              bool read_only, MetaspaceObj::Type type, TRAPS) {
3368  if (HAS_PENDING_EXCEPTION) {
3369    assert(false, "Should not allocate with exception pending");
3370    return NULL;  // caller does a CHECK_NULL too
3371  }
3372
3373  assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3374        "ClassLoaderData::the_null_class_loader_data() should have been used.");
3375
3376  // Allocate in metaspaces without taking out a lock, because it deadlocks
3377  // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
3378  // to revisit this for application class data sharing.
3379  if (DumpSharedSpaces) {
3380    assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3381    Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3382    MetaWord* result = space->allocate(word_size, NonClassType);
3383    if (result == NULL) {
3384      report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3385    }
3386
3387    space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3388
3389    // Zero initialize.
3390    Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3391
3392    return result;
3393  }
3394
3395  MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3396
3397  // Try to allocate metadata.
3398  MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3399
3400  if (result == NULL) {
3401    tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
3402
3403    // Allocation failed.
3404    if (is_init_completed()) {
3405      // Only start a GC if the bootstrapping has completed.
3406
3407      // Try to clean out some memory and retry.
3408      result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3409          loader_data, word_size, mdtype);
3410    }
3411  }
3412
3413  if (result == NULL) {
3414    report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
3415  }
3416
3417  // Zero initialize.
3418  Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3419
3420  return result;
3421}
3422
3423size_t Metaspace::class_chunk_size(size_t word_size) {
3424  assert(using_class_space(), "Has to use class space");
3425  return class_vsm()->calc_chunk_size(word_size);
3426}
3427
3428void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
3429  tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
3430
3431  // If result is still null, we are out of memory.
3432  if (Verbose && TraceMetadataChunkAllocation) {
3433    gclog_or_tty->print_cr("Metaspace allocation failed for size "
3434        SIZE_FORMAT, word_size);
3435    if (loader_data->metaspace_or_null() != NULL) {
3436      loader_data->dump(gclog_or_tty);
3437    }
3438    MetaspaceAux::dump(gclog_or_tty);
3439  }
3440
3441  bool out_of_compressed_class_space = false;
3442  if (is_class_space_allocation(mdtype)) {
3443    Metaspace* metaspace = loader_data->metaspace_non_null();
3444    out_of_compressed_class_space =
3445      MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3446      (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3447      CompressedClassSpaceSize;
3448  }
3449
3450  // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3451  const char* space_string = out_of_compressed_class_space ?
3452    "Compressed class space" : "Metaspace";
3453
3454  report_java_out_of_memory(space_string);
3455
3456  if (JvmtiExport::should_post_resource_exhausted()) {
3457    JvmtiExport::post_resource_exhausted(
3458        JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3459        space_string);
3460  }
3461
3462  if (!is_init_completed()) {
3463    vm_exit_during_initialization("OutOfMemoryError", space_string);
3464  }
3465
3466  if (out_of_compressed_class_space) {
3467    THROW_OOP(Universe::out_of_memory_error_class_metaspace());
3468  } else {
3469    THROW_OOP(Universe::out_of_memory_error_metaspace());
3470  }
3471}
3472
3473const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
3474  switch (mdtype) {
3475    case Metaspace::ClassType: return "Class";
3476    case Metaspace::NonClassType: return "Metadata";
3477    default:
3478      assert(false, err_msg("Got bad mdtype: %d", (int) mdtype));
3479      return NULL;
3480  }
3481}
3482
3483void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3484  assert(DumpSharedSpaces, "sanity");
3485
3486  AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
3487  if (_alloc_record_head == NULL) {
3488    _alloc_record_head = _alloc_record_tail = rec;
3489  } else {
3490    _alloc_record_tail->_next = rec;
3491    _alloc_record_tail = rec;
3492  }
3493}
3494
3495void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3496  assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3497
3498  address last_addr = (address)bottom();
3499
3500  for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3501    address ptr = rec->_ptr;
3502    if (last_addr < ptr) {
3503      closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
3504    }
3505    closure->doit(ptr, rec->_type, rec->_byte_size);
3506    last_addr = ptr + rec->_byte_size;
3507  }
3508
3509  address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
3510  if (last_addr < top) {
3511    closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
3512  }
3513}
3514
3515void Metaspace::purge(MetadataType mdtype) {
3516  get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
3517}
3518
3519void Metaspace::purge() {
3520  MutexLockerEx cl(SpaceManager::expand_lock(),
3521                   Mutex::_no_safepoint_check_flag);
3522  purge(NonClassType);
3523  if (using_class_space()) {
3524    purge(ClassType);
3525  }
3526}
3527
3528void Metaspace::print_on(outputStream* out) const {
3529  // Print both class virtual space counts and metaspace.
3530  if (Verbose) {
3531    vsm()->print_on(out);
3532    if (using_class_space()) {
3533      class_vsm()->print_on(out);
3534    }
3535  }
3536}
3537
3538bool Metaspace::contains(const void* ptr) {
3539  if (vsm()->contains(ptr)) return true;
3540  if (using_class_space()) {
3541    return class_vsm()->contains(ptr);
3542  }
3543  return false;
3544}
3545
3546void Metaspace::verify() {
3547  vsm()->verify();
3548  if (using_class_space()) {
3549    class_vsm()->verify();
3550  }
3551}
3552
3553void Metaspace::dump(outputStream* const out) const {
3554  out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
3555  vsm()->dump(out);
3556  if (using_class_space()) {
3557    out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
3558    class_vsm()->dump(out);
3559  }
3560}
3561
3562/////////////// Unit tests ///////////////
3563
3564#ifndef PRODUCT
3565
3566class TestMetaspaceAuxTest : AllStatic {
3567 public:
3568  static void test_reserved() {
3569    size_t reserved = MetaspaceAux::reserved_bytes();
3570
3571    assert(reserved > 0, "assert");
3572
3573    size_t committed  = MetaspaceAux::committed_bytes();
3574    assert(committed <= reserved, "assert");
3575
3576    size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
3577    assert(reserved_metadata > 0, "assert");
3578    assert(reserved_metadata <= reserved, "assert");
3579
3580    if (UseCompressedClassPointers) {
3581      size_t reserved_class    = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
3582      assert(reserved_class > 0, "assert");
3583      assert(reserved_class < reserved, "assert");
3584    }
3585  }
3586
3587  static void test_committed() {
3588    size_t committed = MetaspaceAux::committed_bytes();
3589
3590    assert(committed > 0, "assert");
3591
3592    size_t reserved  = MetaspaceAux::reserved_bytes();
3593    assert(committed <= reserved, "assert");
3594
3595    size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
3596    assert(committed_metadata > 0, "assert");
3597    assert(committed_metadata <= committed, "assert");
3598
3599    if (UseCompressedClassPointers) {
3600      size_t committed_class    = MetaspaceAux::committed_bytes(Metaspace::ClassType);
3601      assert(committed_class > 0, "assert");
3602      assert(committed_class < committed, "assert");
3603    }
3604  }
3605
3606  static void test_virtual_space_list_large_chunk() {
3607    VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
3608    MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3609    // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
3610    // vm_allocation_granularity aligned on Windows.
3611    size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
3612    large_size += (os::vm_page_size()/BytesPerWord);
3613    vs_list->get_new_chunk(large_size, large_size, 0);
3614  }
3615
3616  static void test() {
3617    test_reserved();
3618    test_committed();
3619    test_virtual_space_list_large_chunk();
3620  }
3621};
3622
3623void TestMetaspaceAux_test() {
3624  TestMetaspaceAuxTest::test();
3625}
3626
3627class TestVirtualSpaceNodeTest {
3628  static void chunk_up(size_t words_left, size_t& num_medium_chunks,
3629                                          size_t& num_small_chunks,
3630                                          size_t& num_specialized_chunks) {
3631    num_medium_chunks = words_left / MediumChunk;
3632    words_left = words_left % MediumChunk;
3633
3634    num_small_chunks = words_left / SmallChunk;
3635    words_left = words_left % SmallChunk;
3636    // how many specialized chunks can we get?
3637    num_specialized_chunks = words_left / SpecializedChunk;
3638    assert(words_left % SpecializedChunk == 0, "should be nothing left");
3639  }
3640
3641 public:
3642  static void test() {
3643    MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3644    const size_t vsn_test_size_words = MediumChunk  * 4;
3645    const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
3646
3647    // The chunk sizes must be multiples of eachother, or this will fail
3648    STATIC_ASSERT(MediumChunk % SmallChunk == 0);
3649    STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
3650
3651    { // No committed memory in VSN
3652      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3653      VirtualSpaceNode vsn(vsn_test_size_bytes);
3654      vsn.initialize();
3655      vsn.retire(&cm);
3656      assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
3657    }
3658
3659    { // All of VSN is committed, half is used by chunks
3660      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3661      VirtualSpaceNode vsn(vsn_test_size_bytes);
3662      vsn.initialize();
3663      vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
3664      vsn.get_chunk_vs(MediumChunk);
3665      vsn.get_chunk_vs(MediumChunk);
3666      vsn.retire(&cm);
3667      assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
3668      assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
3669    }
3670
3671    { // 4 pages of VSN is committed, some is used by chunks
3672      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3673      VirtualSpaceNode vsn(vsn_test_size_bytes);
3674      const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
3675      assert(page_chunks < MediumChunk, "Test expects medium chunks to be at least 4*page_size");
3676      vsn.initialize();
3677      vsn.expand_by(page_chunks, page_chunks);
3678      vsn.get_chunk_vs(SmallChunk);
3679      vsn.get_chunk_vs(SpecializedChunk);
3680      vsn.retire(&cm);
3681
3682      // committed - used = words left to retire
3683      const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
3684
3685      size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3686      chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3687
3688      assert(num_medium_chunks == 0, "should not get any medium chunks");
3689      assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3690      assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3691    }
3692
3693    { // Half of VSN is committed, a humongous chunk is used
3694      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3695      VirtualSpaceNode vsn(vsn_test_size_bytes);
3696      vsn.initialize();
3697      vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
3698      vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
3699      vsn.retire(&cm);
3700
3701      const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
3702      size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3703      chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3704
3705      assert(num_medium_chunks == 0, "should not get any medium chunks");
3706      assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3707      assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3708    }
3709
3710  }
3711
3712#define assert_is_available_positive(word_size) \
3713  assert(vsn.is_available(word_size), \
3714    err_msg(#word_size ": " PTR_FORMAT " bytes were not available in " \
3715            "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3716            (uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end()));
3717
3718#define assert_is_available_negative(word_size) \
3719  assert(!vsn.is_available(word_size), \
3720    err_msg(#word_size ": " PTR_FORMAT " bytes should not be available in " \
3721            "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3722            (uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end()));
3723
3724  static void test_is_available_positive() {
3725    // Reserve some memory.
3726    VirtualSpaceNode vsn(os::vm_allocation_granularity());
3727    assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3728
3729    // Commit some memory.
3730    size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3731    bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3732    assert(expanded, "Failed to commit");
3733
3734    // Check that is_available accepts the committed size.
3735    assert_is_available_positive(commit_word_size);
3736
3737    // Check that is_available accepts half the committed size.
3738    size_t expand_word_size = commit_word_size / 2;
3739    assert_is_available_positive(expand_word_size);
3740  }
3741
3742  static void test_is_available_negative() {
3743    // Reserve some memory.
3744    VirtualSpaceNode vsn(os::vm_allocation_granularity());
3745    assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3746
3747    // Commit some memory.
3748    size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3749    bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3750    assert(expanded, "Failed to commit");
3751
3752    // Check that is_available doesn't accept a too large size.
3753    size_t two_times_commit_word_size = commit_word_size * 2;
3754    assert_is_available_negative(two_times_commit_word_size);
3755  }
3756
3757  static void test_is_available_overflow() {
3758    // Reserve some memory.
3759    VirtualSpaceNode vsn(os::vm_allocation_granularity());
3760    assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3761
3762    // Commit some memory.
3763    size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3764    bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3765    assert(expanded, "Failed to commit");
3766
3767    // Calculate a size that will overflow the virtual space size.
3768    void* virtual_space_max = (void*)(uintptr_t)-1;
3769    size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
3770    size_t overflow_size = bottom_to_max + BytesPerWord;
3771    size_t overflow_word_size = overflow_size / BytesPerWord;
3772
3773    // Check that is_available can handle the overflow.
3774    assert_is_available_negative(overflow_word_size);
3775  }
3776
3777  static void test_is_available() {
3778    TestVirtualSpaceNodeTest::test_is_available_positive();
3779    TestVirtualSpaceNodeTest::test_is_available_negative();
3780    TestVirtualSpaceNodeTest::test_is_available_overflow();
3781  }
3782};
3783
3784void TestVirtualSpaceNode_test() {
3785  TestVirtualSpaceNodeTest::test();
3786  TestVirtualSpaceNodeTest::test_is_available();
3787}
3788
3789#endif
3790