metaspace.cpp revision 10159:832fc8bf51cb
1/*
2 * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24#include "precompiled.hpp"
25#include "gc/shared/collectedHeap.hpp"
26#include "gc/shared/collectorPolicy.hpp"
27#include "gc/shared/gcLocker.hpp"
28#include "logging/log.hpp"
29#include "memory/allocation.hpp"
30#include "memory/binaryTreeDictionary.hpp"
31#include "memory/filemap.hpp"
32#include "memory/freeList.hpp"
33#include "memory/metachunk.hpp"
34#include "memory/metaspace.hpp"
35#include "memory/metaspaceGCThresholdUpdater.hpp"
36#include "memory/metaspaceShared.hpp"
37#include "memory/metaspaceTracer.hpp"
38#include "memory/resourceArea.hpp"
39#include "memory/universe.hpp"
40#include "runtime/atomic.inline.hpp"
41#include "runtime/globals.hpp"
42#include "runtime/init.hpp"
43#include "runtime/java.hpp"
44#include "runtime/mutex.hpp"
45#include "runtime/orderAccess.inline.hpp"
46#include "services/memTracker.hpp"
47#include "services/memoryService.hpp"
48#include "utilities/copy.hpp"
49#include "utilities/debug.hpp"
50#include "utilities/macros.hpp"
51
52typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
53typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
54
55// Set this constant to enable slow integrity checking of the free chunk lists
56const bool metaspace_slow_verify = false;
57
58size_t const allocation_from_dictionary_limit = 4 * K;
59
60MetaWord* last_allocated = 0;
61
62size_t Metaspace::_compressed_class_space_size;
63const MetaspaceTracer* Metaspace::_tracer = NULL;
64
65// Used in declarations in SpaceManager and ChunkManager
66enum ChunkIndex {
67  ZeroIndex = 0,
68  SpecializedIndex = ZeroIndex,
69  SmallIndex = SpecializedIndex + 1,
70  MediumIndex = SmallIndex + 1,
71  HumongousIndex = MediumIndex + 1,
72  NumberOfFreeLists = 3,
73  NumberOfInUseLists = 4
74};
75
76enum ChunkSizes {    // in words.
77  ClassSpecializedChunk = 128,
78  SpecializedChunk = 128,
79  ClassSmallChunk = 256,
80  SmallChunk = 512,
81  ClassMediumChunk = 4 * K,
82  MediumChunk = 8 * K
83};
84
85static ChunkIndex next_chunk_index(ChunkIndex i) {
86  assert(i < NumberOfInUseLists, "Out of bound");
87  return (ChunkIndex) (i+1);
88}
89
90volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
91uint MetaspaceGC::_shrink_factor = 0;
92bool MetaspaceGC::_should_concurrent_collect = false;
93
94typedef class FreeList<Metachunk> ChunkList;
95
96// Manages the global free lists of chunks.
97class ChunkManager : public CHeapObj<mtInternal> {
98  friend class TestVirtualSpaceNodeTest;
99
100  // Free list of chunks of different sizes.
101  //   SpecializedChunk
102  //   SmallChunk
103  //   MediumChunk
104  //   HumongousChunk
105  ChunkList _free_chunks[NumberOfFreeLists];
106
107  //   HumongousChunk
108  ChunkTreeDictionary _humongous_dictionary;
109
110  // ChunkManager in all lists of this type
111  size_t _free_chunks_total;
112  size_t _free_chunks_count;
113
114  void dec_free_chunks_total(size_t v) {
115    assert(_free_chunks_count > 0 &&
116             _free_chunks_total > 0,
117             "About to go negative");
118    Atomic::add_ptr(-1, &_free_chunks_count);
119    jlong minus_v = (jlong) - (jlong) v;
120    Atomic::add_ptr(minus_v, &_free_chunks_total);
121  }
122
123  // Debug support
124
125  size_t sum_free_chunks();
126  size_t sum_free_chunks_count();
127
128  void locked_verify_free_chunks_total();
129  void slow_locked_verify_free_chunks_total() {
130    if (metaspace_slow_verify) {
131      locked_verify_free_chunks_total();
132    }
133  }
134  void locked_verify_free_chunks_count();
135  void slow_locked_verify_free_chunks_count() {
136    if (metaspace_slow_verify) {
137      locked_verify_free_chunks_count();
138    }
139  }
140  void verify_free_chunks_count();
141
142 public:
143
144  ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
145      : _free_chunks_total(0), _free_chunks_count(0) {
146    _free_chunks[SpecializedIndex].set_size(specialized_size);
147    _free_chunks[SmallIndex].set_size(small_size);
148    _free_chunks[MediumIndex].set_size(medium_size);
149  }
150
151  // add or delete (return) a chunk to the global freelist.
152  Metachunk* chunk_freelist_allocate(size_t word_size);
153
154  // Map a size to a list index assuming that there are lists
155  // for special, small, medium, and humongous chunks.
156  static ChunkIndex list_index(size_t size);
157
158  // Remove the chunk from its freelist.  It is
159  // expected to be on one of the _free_chunks[] lists.
160  void remove_chunk(Metachunk* chunk);
161
162  // Add the simple linked list of chunks to the freelist of chunks
163  // of type index.
164  void return_chunks(ChunkIndex index, Metachunk* chunks);
165
166  // Total of the space in the free chunks list
167  size_t free_chunks_total_words();
168  size_t free_chunks_total_bytes();
169
170  // Number of chunks in the free chunks list
171  size_t free_chunks_count();
172
173  void inc_free_chunks_total(size_t v, size_t count = 1) {
174    Atomic::add_ptr(count, &_free_chunks_count);
175    Atomic::add_ptr(v, &_free_chunks_total);
176  }
177  ChunkTreeDictionary* humongous_dictionary() {
178    return &_humongous_dictionary;
179  }
180
181  ChunkList* free_chunks(ChunkIndex index);
182
183  // Returns the list for the given chunk word size.
184  ChunkList* find_free_chunks_list(size_t word_size);
185
186  // Remove from a list by size.  Selects list based on size of chunk.
187  Metachunk* free_chunks_get(size_t chunk_word_size);
188
189#define index_bounds_check(index)                                         \
190  assert(index == SpecializedIndex ||                                     \
191         index == SmallIndex ||                                           \
192         index == MediumIndex ||                                          \
193         index == HumongousIndex, "Bad index: %d", (int) index)
194
195  size_t num_free_chunks(ChunkIndex index) const {
196    index_bounds_check(index);
197
198    if (index == HumongousIndex) {
199      return _humongous_dictionary.total_free_blocks();
200    }
201
202    ssize_t count = _free_chunks[index].count();
203    return count == -1 ? 0 : (size_t) count;
204  }
205
206  size_t size_free_chunks_in_bytes(ChunkIndex index) const {
207    index_bounds_check(index);
208
209    size_t word_size = 0;
210    if (index == HumongousIndex) {
211      word_size = _humongous_dictionary.total_size();
212    } else {
213      const size_t size_per_chunk_in_words = _free_chunks[index].size();
214      word_size = size_per_chunk_in_words * num_free_chunks(index);
215    }
216
217    return word_size * BytesPerWord;
218  }
219
220  MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
221    return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
222                                         num_free_chunks(SmallIndex),
223                                         num_free_chunks(MediumIndex),
224                                         num_free_chunks(HumongousIndex),
225                                         size_free_chunks_in_bytes(SpecializedIndex),
226                                         size_free_chunks_in_bytes(SmallIndex),
227                                         size_free_chunks_in_bytes(MediumIndex),
228                                         size_free_chunks_in_bytes(HumongousIndex));
229  }
230
231  // Debug support
232  void verify();
233  void slow_verify() {
234    if (metaspace_slow_verify) {
235      verify();
236    }
237  }
238  void locked_verify();
239  void slow_locked_verify() {
240    if (metaspace_slow_verify) {
241      locked_verify();
242    }
243  }
244  void verify_free_chunks_total();
245
246  void locked_print_free_chunks(outputStream* st);
247  void locked_print_sum_free_chunks(outputStream* st);
248
249  void print_on(outputStream* st) const;
250};
251
252// Used to manage the free list of Metablocks (a block corresponds
253// to the allocation of a quantum of metadata).
254class BlockFreelist VALUE_OBJ_CLASS_SPEC {
255  BlockTreeDictionary* const _dictionary;
256
257  // Only allocate and split from freelist if the size of the allocation
258  // is at least 1/4th the size of the available block.
259  const static int WasteMultiplier = 4;
260
261  // Accessors
262  BlockTreeDictionary* dictionary() const { return _dictionary; }
263
264 public:
265  BlockFreelist();
266  ~BlockFreelist();
267
268  // Get and return a block to the free list
269  MetaWord* get_block(size_t word_size);
270  void return_block(MetaWord* p, size_t word_size);
271
272  size_t total_size() { return dictionary()->total_size(); }
273
274  void print_on(outputStream* st) const;
275};
276
277// A VirtualSpaceList node.
278class VirtualSpaceNode : public CHeapObj<mtClass> {
279  friend class VirtualSpaceList;
280
281  // Link to next VirtualSpaceNode
282  VirtualSpaceNode* _next;
283
284  // total in the VirtualSpace
285  MemRegion _reserved;
286  ReservedSpace _rs;
287  VirtualSpace _virtual_space;
288  MetaWord* _top;
289  // count of chunks contained in this VirtualSpace
290  uintx _container_count;
291
292  // Convenience functions to access the _virtual_space
293  char* low()  const { return virtual_space()->low(); }
294  char* high() const { return virtual_space()->high(); }
295
296  // The first Metachunk will be allocated at the bottom of the
297  // VirtualSpace
298  Metachunk* first_chunk() { return (Metachunk*) bottom(); }
299
300  // Committed but unused space in the virtual space
301  size_t free_words_in_vs() const;
302 public:
303
304  VirtualSpaceNode(size_t byte_size);
305  VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
306  ~VirtualSpaceNode();
307
308  // Convenience functions for logical bottom and end
309  MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
310  MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
311
312  bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
313
314  size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
315  size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
316
317  bool is_pre_committed() const { return _virtual_space.special(); }
318
319  // address of next available space in _virtual_space;
320  // Accessors
321  VirtualSpaceNode* next() { return _next; }
322  void set_next(VirtualSpaceNode* v) { _next = v; }
323
324  void set_reserved(MemRegion const v) { _reserved = v; }
325  void set_top(MetaWord* v) { _top = v; }
326
327  // Accessors
328  MemRegion* reserved() { return &_reserved; }
329  VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
330
331  // Returns true if "word_size" is available in the VirtualSpace
332  bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
333
334  MetaWord* top() const { return _top; }
335  void inc_top(size_t word_size) { _top += word_size; }
336
337  uintx container_count() { return _container_count; }
338  void inc_container_count();
339  void dec_container_count();
340#ifdef ASSERT
341  uintx container_count_slow();
342  void verify_container_count();
343#endif
344
345  // used and capacity in this single entry in the list
346  size_t used_words_in_vs() const;
347  size_t capacity_words_in_vs() const;
348
349  bool initialize();
350
351  // get space from the virtual space
352  Metachunk* take_from_committed(size_t chunk_word_size);
353
354  // Allocate a chunk from the virtual space and return it.
355  Metachunk* get_chunk_vs(size_t chunk_word_size);
356
357  // Expands/shrinks the committed space in a virtual space.  Delegates
358  // to Virtualspace
359  bool expand_by(size_t min_words, size_t preferred_words);
360
361  // In preparation for deleting this node, remove all the chunks
362  // in the node from any freelist.
363  void purge(ChunkManager* chunk_manager);
364
365  // If an allocation doesn't fit in the current node a new node is created.
366  // Allocate chunks out of the remaining committed space in this node
367  // to avoid wasting that memory.
368  // This always adds up because all the chunk sizes are multiples of
369  // the smallest chunk size.
370  void retire(ChunkManager* chunk_manager);
371
372#ifdef ASSERT
373  // Debug support
374  void mangle();
375#endif
376
377  void print_on(outputStream* st) const;
378};
379
380#define assert_is_ptr_aligned(ptr, alignment) \
381  assert(is_ptr_aligned(ptr, alignment),      \
382         PTR_FORMAT " is not aligned to "     \
383         SIZE_FORMAT, p2i(ptr), alignment)
384
385#define assert_is_size_aligned(size, alignment) \
386  assert(is_size_aligned(size, alignment),      \
387         SIZE_FORMAT " is not aligned to "      \
388         SIZE_FORMAT, size, alignment)
389
390
391// Decide if large pages should be committed when the memory is reserved.
392static bool should_commit_large_pages_when_reserving(size_t bytes) {
393  if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
394    size_t words = bytes / BytesPerWord;
395    bool is_class = false; // We never reserve large pages for the class space.
396    if (MetaspaceGC::can_expand(words, is_class) &&
397        MetaspaceGC::allowed_expansion() >= words) {
398      return true;
399    }
400  }
401
402  return false;
403}
404
405  // byte_size is the size of the associated virtualspace.
406VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
407  assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
408
409#if INCLUDE_CDS
410  // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
411  // configurable address, generally at the top of the Java heap so other
412  // memory addresses don't conflict.
413  if (DumpSharedSpaces) {
414    bool large_pages = false; // No large pages when dumping the CDS archive.
415    char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
416
417    _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base);
418    if (_rs.is_reserved()) {
419      assert(shared_base == 0 || _rs.base() == shared_base, "should match");
420    } else {
421      // Get a mmap region anywhere if the SharedBaseAddress fails.
422      _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
423    }
424    MetaspaceShared::initialize_shared_rs(&_rs);
425  } else
426#endif
427  {
428    bool large_pages = should_commit_large_pages_when_reserving(bytes);
429
430    _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
431  }
432
433  if (_rs.is_reserved()) {
434    assert(_rs.base() != NULL, "Catch if we get a NULL address");
435    assert(_rs.size() != 0, "Catch if we get a 0 size");
436    assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
437    assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
438
439    MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
440  }
441}
442
443void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
444  Metachunk* chunk = first_chunk();
445  Metachunk* invalid_chunk = (Metachunk*) top();
446  while (chunk < invalid_chunk ) {
447    assert(chunk->is_tagged_free(), "Should be tagged free");
448    MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
449    chunk_manager->remove_chunk(chunk);
450    assert(chunk->next() == NULL &&
451           chunk->prev() == NULL,
452           "Was not removed from its list");
453    chunk = (Metachunk*) next;
454  }
455}
456
457#ifdef ASSERT
458uintx VirtualSpaceNode::container_count_slow() {
459  uintx count = 0;
460  Metachunk* chunk = first_chunk();
461  Metachunk* invalid_chunk = (Metachunk*) top();
462  while (chunk < invalid_chunk ) {
463    MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
464    // Don't count the chunks on the free lists.  Those are
465    // still part of the VirtualSpaceNode but not currently
466    // counted.
467    if (!chunk->is_tagged_free()) {
468      count++;
469    }
470    chunk = (Metachunk*) next;
471  }
472  return count;
473}
474#endif
475
476// List of VirtualSpaces for metadata allocation.
477class VirtualSpaceList : public CHeapObj<mtClass> {
478  friend class VirtualSpaceNode;
479
480  enum VirtualSpaceSizes {
481    VirtualSpaceSize = 256 * K
482  };
483
484  // Head of the list
485  VirtualSpaceNode* _virtual_space_list;
486  // virtual space currently being used for allocations
487  VirtualSpaceNode* _current_virtual_space;
488
489  // Is this VirtualSpaceList used for the compressed class space
490  bool _is_class;
491
492  // Sum of reserved and committed memory in the virtual spaces
493  size_t _reserved_words;
494  size_t _committed_words;
495
496  // Number of virtual spaces
497  size_t _virtual_space_count;
498
499  ~VirtualSpaceList();
500
501  VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
502
503  void set_virtual_space_list(VirtualSpaceNode* v) {
504    _virtual_space_list = v;
505  }
506  void set_current_virtual_space(VirtualSpaceNode* v) {
507    _current_virtual_space = v;
508  }
509
510  void link_vs(VirtualSpaceNode* new_entry);
511
512  // Get another virtual space and add it to the list.  This
513  // is typically prompted by a failed attempt to allocate a chunk
514  // and is typically followed by the allocation of a chunk.
515  bool create_new_virtual_space(size_t vs_word_size);
516
517  // Chunk up the unused committed space in the current
518  // virtual space and add the chunks to the free list.
519  void retire_current_virtual_space();
520
521 public:
522  VirtualSpaceList(size_t word_size);
523  VirtualSpaceList(ReservedSpace rs);
524
525  size_t free_bytes();
526
527  Metachunk* get_new_chunk(size_t word_size,
528                           size_t grow_chunks_by_words,
529                           size_t medium_chunk_bunch);
530
531  bool expand_node_by(VirtualSpaceNode* node,
532                      size_t min_words,
533                      size_t preferred_words);
534
535  bool expand_by(size_t min_words,
536                 size_t preferred_words);
537
538  VirtualSpaceNode* current_virtual_space() {
539    return _current_virtual_space;
540  }
541
542  bool is_class() const { return _is_class; }
543
544  bool initialization_succeeded() { return _virtual_space_list != NULL; }
545
546  size_t reserved_words()  { return _reserved_words; }
547  size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
548  size_t committed_words() { return _committed_words; }
549  size_t committed_bytes() { return committed_words() * BytesPerWord; }
550
551  void inc_reserved_words(size_t v);
552  void dec_reserved_words(size_t v);
553  void inc_committed_words(size_t v);
554  void dec_committed_words(size_t v);
555  void inc_virtual_space_count();
556  void dec_virtual_space_count();
557
558  bool contains(const void* ptr);
559
560  // Unlink empty VirtualSpaceNodes and free it.
561  void purge(ChunkManager* chunk_manager);
562
563  void print_on(outputStream* st) const;
564
565  class VirtualSpaceListIterator : public StackObj {
566    VirtualSpaceNode* _virtual_spaces;
567   public:
568    VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
569      _virtual_spaces(virtual_spaces) {}
570
571    bool repeat() {
572      return _virtual_spaces != NULL;
573    }
574
575    VirtualSpaceNode* get_next() {
576      VirtualSpaceNode* result = _virtual_spaces;
577      if (_virtual_spaces != NULL) {
578        _virtual_spaces = _virtual_spaces->next();
579      }
580      return result;
581    }
582  };
583};
584
585class Metadebug : AllStatic {
586  // Debugging support for Metaspaces
587  static int _allocation_fail_alot_count;
588
589 public:
590
591  static void init_allocation_fail_alot_count();
592#ifdef ASSERT
593  static bool test_metadata_failure();
594#endif
595};
596
597int Metadebug::_allocation_fail_alot_count = 0;
598
599//  SpaceManager - used by Metaspace to handle allocations
600class SpaceManager : public CHeapObj<mtClass> {
601  friend class Metaspace;
602  friend class Metadebug;
603
604 private:
605
606  // protects allocations
607  Mutex* const _lock;
608
609  // Type of metadata allocated.
610  Metaspace::MetadataType _mdtype;
611
612  // List of chunks in use by this SpaceManager.  Allocations
613  // are done from the current chunk.  The list is used for deallocating
614  // chunks when the SpaceManager is freed.
615  Metachunk* _chunks_in_use[NumberOfInUseLists];
616  Metachunk* _current_chunk;
617
618  // Maximum number of small chunks to allocate to a SpaceManager
619  static uint const _small_chunk_limit;
620
621  // Sum of all space in allocated chunks
622  size_t _allocated_blocks_words;
623
624  // Sum of all allocated chunks
625  size_t _allocated_chunks_words;
626  size_t _allocated_chunks_count;
627
628  // Free lists of blocks are per SpaceManager since they
629  // are assumed to be in chunks in use by the SpaceManager
630  // and all chunks in use by a SpaceManager are freed when
631  // the class loader using the SpaceManager is collected.
632  BlockFreelist _block_freelists;
633
634  // protects virtualspace and chunk expansions
635  static const char*  _expand_lock_name;
636  static const int    _expand_lock_rank;
637  static Mutex* const _expand_lock;
638
639 private:
640  // Accessors
641  Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
642  void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
643    _chunks_in_use[index] = v;
644  }
645
646  BlockFreelist* block_freelists() const {
647    return (BlockFreelist*) &_block_freelists;
648  }
649
650  Metaspace::MetadataType mdtype() { return _mdtype; }
651
652  VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
653  ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
654
655  Metachunk* current_chunk() const { return _current_chunk; }
656  void set_current_chunk(Metachunk* v) {
657    _current_chunk = v;
658  }
659
660  Metachunk* find_current_chunk(size_t word_size);
661
662  // Add chunk to the list of chunks in use
663  void add_chunk(Metachunk* v, bool make_current);
664  void retire_current_chunk();
665
666  Mutex* lock() const { return _lock; }
667
668  const char* chunk_size_name(ChunkIndex index) const;
669
670 protected:
671  void initialize();
672
673 public:
674  SpaceManager(Metaspace::MetadataType mdtype,
675               Mutex* lock);
676  ~SpaceManager();
677
678  enum ChunkMultiples {
679    MediumChunkMultiple = 4
680  };
681
682  bool is_class() { return _mdtype == Metaspace::ClassType; }
683
684  // Accessors
685  size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; }
686  size_t small_chunk_size()       { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
687  size_t medium_chunk_size()      { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
688  size_t medium_chunk_bunch()     { return medium_chunk_size() * MediumChunkMultiple; }
689
690  size_t smallest_chunk_size()  { return specialized_chunk_size(); }
691
692  size_t allocated_blocks_words() const { return _allocated_blocks_words; }
693  size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
694  size_t allocated_chunks_words() const { return _allocated_chunks_words; }
695  size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; }
696  size_t allocated_chunks_count() const { return _allocated_chunks_count; }
697
698  bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
699
700  static Mutex* expand_lock() { return _expand_lock; }
701
702  // Increment the per Metaspace and global running sums for Metachunks
703  // by the given size.  This is used when a Metachunk to added to
704  // the in-use list.
705  void inc_size_metrics(size_t words);
706  // Increment the per Metaspace and global running sums Metablocks by the given
707  // size.  This is used when a Metablock is allocated.
708  void inc_used_metrics(size_t words);
709  // Delete the portion of the running sums for this SpaceManager. That is,
710  // the globals running sums for the Metachunks and Metablocks are
711  // decremented for all the Metachunks in-use by this SpaceManager.
712  void dec_total_from_size_metrics();
713
714  // Set the sizes for the initial chunks.
715  void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
716                               size_t* chunk_word_size,
717                               size_t* class_chunk_word_size);
718
719  size_t sum_capacity_in_chunks_in_use() const;
720  size_t sum_used_in_chunks_in_use() const;
721  size_t sum_free_in_chunks_in_use() const;
722  size_t sum_waste_in_chunks_in_use() const;
723  size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
724
725  size_t sum_count_in_chunks_in_use();
726  size_t sum_count_in_chunks_in_use(ChunkIndex i);
727
728  Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);
729
730  // Block allocation and deallocation.
731  // Allocates a block from the current chunk
732  MetaWord* allocate(size_t word_size);
733  // Allocates a block from a small chunk
734  MetaWord* get_small_chunk_and_allocate(size_t word_size);
735
736  // Helper for allocations
737  MetaWord* allocate_work(size_t word_size);
738
739  // Returns a block to the per manager freelist
740  void deallocate(MetaWord* p, size_t word_size);
741
742  // Based on the allocation size and a minimum chunk size,
743  // returned chunk size (for expanding space for chunk allocation).
744  size_t calc_chunk_size(size_t allocation_word_size);
745
746  // Called when an allocation from the current chunk fails.
747  // Gets a new chunk (may require getting a new virtual space),
748  // and allocates from that chunk.
749  MetaWord* grow_and_allocate(size_t word_size);
750
751  // Notify memory usage to MemoryService.
752  void track_metaspace_memory_usage();
753
754  // debugging support.
755
756  void dump(outputStream* const out) const;
757  void print_on(outputStream* st) const;
758  void locked_print_chunks_in_use_on(outputStream* st) const;
759
760  void verify();
761  void verify_chunk_size(Metachunk* chunk);
762  NOT_PRODUCT(void mangle_freed_chunks();)
763#ifdef ASSERT
764  void verify_allocated_blocks_words();
765#endif
766
767  size_t get_raw_word_size(size_t word_size) {
768    size_t byte_size = word_size * BytesPerWord;
769
770    size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
771    raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment());
772
773    size_t raw_word_size = raw_bytes_size / BytesPerWord;
774    assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
775
776    return raw_word_size;
777  }
778};
779
780uint const SpaceManager::_small_chunk_limit = 4;
781
782const char* SpaceManager::_expand_lock_name =
783  "SpaceManager chunk allocation lock";
784const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
785Mutex* const SpaceManager::_expand_lock =
786  new Mutex(SpaceManager::_expand_lock_rank,
787            SpaceManager::_expand_lock_name,
788            Mutex::_allow_vm_block_flag,
789            Monitor::_safepoint_check_never);
790
791void VirtualSpaceNode::inc_container_count() {
792  assert_lock_strong(SpaceManager::expand_lock());
793  _container_count++;
794  DEBUG_ONLY(verify_container_count();)
795}
796
797void VirtualSpaceNode::dec_container_count() {
798  assert_lock_strong(SpaceManager::expand_lock());
799  _container_count--;
800}
801
802#ifdef ASSERT
803void VirtualSpaceNode::verify_container_count() {
804  assert(_container_count == container_count_slow(),
805         "Inconsistency in container_count _container_count " UINTX_FORMAT
806         " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
807}
808#endif
809
810// BlockFreelist methods
811
812BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()) {}
813
814BlockFreelist::~BlockFreelist() {
815  LogHandle(gc, metaspace, freelist) log;
816  if (log.is_trace()) {
817    ResourceMark rm;
818    dictionary()->print_free_lists(log.trace_stream());
819  }
820  delete _dictionary;
821}
822
823void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
824  Metablock* free_chunk = ::new (p) Metablock(word_size);
825  dictionary()->return_chunk(free_chunk);
826}
827
828MetaWord* BlockFreelist::get_block(size_t word_size) {
829  if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
830    // Dark matter.  Too small for dictionary.
831    return NULL;
832  }
833
834  Metablock* free_block =
835    dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
836  if (free_block == NULL) {
837    return NULL;
838  }
839
840  const size_t block_size = free_block->size();
841  if (block_size > WasteMultiplier * word_size) {
842    return_block((MetaWord*)free_block, block_size);
843    return NULL;
844  }
845
846  MetaWord* new_block = (MetaWord*)free_block;
847  assert(block_size >= word_size, "Incorrect size of block from freelist");
848  const size_t unused = block_size - word_size;
849  if (unused >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
850    return_block(new_block + word_size, unused);
851  }
852
853  return new_block;
854}
855
856void BlockFreelist::print_on(outputStream* st) const {
857  dictionary()->print_free_lists(st);
858}
859
860// VirtualSpaceNode methods
861
862VirtualSpaceNode::~VirtualSpaceNode() {
863  _rs.release();
864#ifdef ASSERT
865  size_t word_size = sizeof(*this) / BytesPerWord;
866  Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
867#endif
868}
869
870size_t VirtualSpaceNode::used_words_in_vs() const {
871  return pointer_delta(top(), bottom(), sizeof(MetaWord));
872}
873
874// Space committed in the VirtualSpace
875size_t VirtualSpaceNode::capacity_words_in_vs() const {
876  return pointer_delta(end(), bottom(), sizeof(MetaWord));
877}
878
879size_t VirtualSpaceNode::free_words_in_vs() const {
880  return pointer_delta(end(), top(), sizeof(MetaWord));
881}
882
883// Allocates the chunk from the virtual space only.
884// This interface is also used internally for debugging.  Not all
885// chunks removed here are necessarily used for allocation.
886Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
887  // Bottom of the new chunk
888  MetaWord* chunk_limit = top();
889  assert(chunk_limit != NULL, "Not safe to call this method");
890
891  // The virtual spaces are always expanded by the
892  // commit granularity to enforce the following condition.
893  // Without this the is_available check will not work correctly.
894  assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
895      "The committed memory doesn't match the expanded memory.");
896
897  if (!is_available(chunk_word_size)) {
898    LogHandle(gc, metaspace, freelist) log;
899    log.debug("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
900    // Dump some information about the virtual space that is nearly full
901    ResourceMark rm;
902    print_on(log.debug_stream());
903    return NULL;
904  }
905
906  // Take the space  (bump top on the current virtual space).
907  inc_top(chunk_word_size);
908
909  // Initialize the chunk
910  Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
911  return result;
912}
913
914
915// Expand the virtual space (commit more of the reserved space)
916bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
917  size_t min_bytes = min_words * BytesPerWord;
918  size_t preferred_bytes = preferred_words * BytesPerWord;
919
920  size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
921
922  if (uncommitted < min_bytes) {
923    return false;
924  }
925
926  size_t commit = MIN2(preferred_bytes, uncommitted);
927  bool result = virtual_space()->expand_by(commit, false);
928
929  assert(result, "Failed to commit memory");
930
931  return result;
932}
933
934Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
935  assert_lock_strong(SpaceManager::expand_lock());
936  Metachunk* result = take_from_committed(chunk_word_size);
937  if (result != NULL) {
938    inc_container_count();
939  }
940  return result;
941}
942
943bool VirtualSpaceNode::initialize() {
944
945  if (!_rs.is_reserved()) {
946    return false;
947  }
948
949  // These are necessary restriction to make sure that the virtual space always
950  // grows in steps of Metaspace::commit_alignment(). If both base and size are
951  // aligned only the middle alignment of the VirtualSpace is used.
952  assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
953  assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
954
955  // ReservedSpaces marked as special will have the entire memory
956  // pre-committed. Setting a committed size will make sure that
957  // committed_size and actual_committed_size agrees.
958  size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
959
960  bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
961                                            Metaspace::commit_alignment());
962  if (result) {
963    assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
964        "Checking that the pre-committed memory was registered by the VirtualSpace");
965
966    set_top((MetaWord*)virtual_space()->low());
967    set_reserved(MemRegion((HeapWord*)_rs.base(),
968                 (HeapWord*)(_rs.base() + _rs.size())));
969
970    assert(reserved()->start() == (HeapWord*) _rs.base(),
971           "Reserved start was not set properly " PTR_FORMAT
972           " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
973    assert(reserved()->word_size() == _rs.size() / BytesPerWord,
974           "Reserved size was not set properly " SIZE_FORMAT
975           " != " SIZE_FORMAT, reserved()->word_size(),
976           _rs.size() / BytesPerWord);
977  }
978
979  return result;
980}
981
982void VirtualSpaceNode::print_on(outputStream* st) const {
983  size_t used = used_words_in_vs();
984  size_t capacity = capacity_words_in_vs();
985  VirtualSpace* vs = virtual_space();
986  st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used "
987           "[" PTR_FORMAT ", " PTR_FORMAT ", "
988           PTR_FORMAT ", " PTR_FORMAT ")",
989           p2i(vs), capacity / K,
990           capacity == 0 ? 0 : used * 100 / capacity,
991           p2i(bottom()), p2i(top()), p2i(end()),
992           p2i(vs->high_boundary()));
993}
994
995#ifdef ASSERT
996void VirtualSpaceNode::mangle() {
997  size_t word_size = capacity_words_in_vs();
998  Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
999}
1000#endif // ASSERT
1001
1002// VirtualSpaceList methods
1003// Space allocated from the VirtualSpace
1004
1005VirtualSpaceList::~VirtualSpaceList() {
1006  VirtualSpaceListIterator iter(virtual_space_list());
1007  while (iter.repeat()) {
1008    VirtualSpaceNode* vsl = iter.get_next();
1009    delete vsl;
1010  }
1011}
1012
1013void VirtualSpaceList::inc_reserved_words(size_t v) {
1014  assert_lock_strong(SpaceManager::expand_lock());
1015  _reserved_words = _reserved_words + v;
1016}
1017void VirtualSpaceList::dec_reserved_words(size_t v) {
1018  assert_lock_strong(SpaceManager::expand_lock());
1019  _reserved_words = _reserved_words - v;
1020}
1021
1022#define assert_committed_below_limit()                        \
1023  assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \
1024         "Too much committed memory. Committed: " SIZE_FORMAT \
1025         " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1026         MetaspaceAux::committed_bytes(), MaxMetaspaceSize);
1027
1028void VirtualSpaceList::inc_committed_words(size_t v) {
1029  assert_lock_strong(SpaceManager::expand_lock());
1030  _committed_words = _committed_words + v;
1031
1032  assert_committed_below_limit();
1033}
1034void VirtualSpaceList::dec_committed_words(size_t v) {
1035  assert_lock_strong(SpaceManager::expand_lock());
1036  _committed_words = _committed_words - v;
1037
1038  assert_committed_below_limit();
1039}
1040
1041void VirtualSpaceList::inc_virtual_space_count() {
1042  assert_lock_strong(SpaceManager::expand_lock());
1043  _virtual_space_count++;
1044}
1045void VirtualSpaceList::dec_virtual_space_count() {
1046  assert_lock_strong(SpaceManager::expand_lock());
1047  _virtual_space_count--;
1048}
1049
1050void ChunkManager::remove_chunk(Metachunk* chunk) {
1051  size_t word_size = chunk->word_size();
1052  ChunkIndex index = list_index(word_size);
1053  if (index != HumongousIndex) {
1054    free_chunks(index)->remove_chunk(chunk);
1055  } else {
1056    humongous_dictionary()->remove_chunk(chunk);
1057  }
1058
1059  // Chunk is being removed from the chunks free list.
1060  dec_free_chunks_total(chunk->word_size());
1061}
1062
1063// Walk the list of VirtualSpaceNodes and delete
1064// nodes with a 0 container_count.  Remove Metachunks in
1065// the node from their respective freelists.
1066void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1067  assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
1068  assert_lock_strong(SpaceManager::expand_lock());
1069  // Don't use a VirtualSpaceListIterator because this
1070  // list is being changed and a straightforward use of an iterator is not safe.
1071  VirtualSpaceNode* purged_vsl = NULL;
1072  VirtualSpaceNode* prev_vsl = virtual_space_list();
1073  VirtualSpaceNode* next_vsl = prev_vsl;
1074  while (next_vsl != NULL) {
1075    VirtualSpaceNode* vsl = next_vsl;
1076    next_vsl = vsl->next();
1077    // Don't free the current virtual space since it will likely
1078    // be needed soon.
1079    if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1080      // Unlink it from the list
1081      if (prev_vsl == vsl) {
1082        // This is the case of the current node being the first node.
1083        assert(vsl == virtual_space_list(), "Expected to be the first node");
1084        set_virtual_space_list(vsl->next());
1085      } else {
1086        prev_vsl->set_next(vsl->next());
1087      }
1088
1089      vsl->purge(chunk_manager);
1090      dec_reserved_words(vsl->reserved_words());
1091      dec_committed_words(vsl->committed_words());
1092      dec_virtual_space_count();
1093      purged_vsl = vsl;
1094      delete vsl;
1095    } else {
1096      prev_vsl = vsl;
1097    }
1098  }
1099#ifdef ASSERT
1100  if (purged_vsl != NULL) {
1101    // List should be stable enough to use an iterator here.
1102    VirtualSpaceListIterator iter(virtual_space_list());
1103    while (iter.repeat()) {
1104      VirtualSpaceNode* vsl = iter.get_next();
1105      assert(vsl != purged_vsl, "Purge of vsl failed");
1106    }
1107  }
1108#endif
1109}
1110
1111
1112// This function looks at the mmap regions in the metaspace without locking.
1113// The chunks are added with store ordering and not deleted except for at
1114// unloading time during a safepoint.
1115bool VirtualSpaceList::contains(const void* ptr) {
1116  // List should be stable enough to use an iterator here because removing virtual
1117  // space nodes is only allowed at a safepoint.
1118  VirtualSpaceListIterator iter(virtual_space_list());
1119  while (iter.repeat()) {
1120    VirtualSpaceNode* vsn = iter.get_next();
1121    if (vsn->contains(ptr)) {
1122      return true;
1123    }
1124  }
1125  return false;
1126}
1127
1128void VirtualSpaceList::retire_current_virtual_space() {
1129  assert_lock_strong(SpaceManager::expand_lock());
1130
1131  VirtualSpaceNode* vsn = current_virtual_space();
1132
1133  ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
1134                                  Metaspace::chunk_manager_metadata();
1135
1136  vsn->retire(cm);
1137}
1138
1139void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
1140  for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
1141    ChunkIndex index = (ChunkIndex)i;
1142    size_t chunk_size = chunk_manager->free_chunks(index)->size();
1143
1144    while (free_words_in_vs() >= chunk_size) {
1145      DEBUG_ONLY(verify_container_count();)
1146      Metachunk* chunk = get_chunk_vs(chunk_size);
1147      assert(chunk != NULL, "allocation should have been successful");
1148
1149      chunk_manager->return_chunks(index, chunk);
1150      chunk_manager->inc_free_chunks_total(chunk_size);
1151      DEBUG_ONLY(verify_container_count();)
1152    }
1153  }
1154  assert(free_words_in_vs() == 0, "should be empty now");
1155}
1156
1157VirtualSpaceList::VirtualSpaceList(size_t word_size) :
1158                                   _is_class(false),
1159                                   _virtual_space_list(NULL),
1160                                   _current_virtual_space(NULL),
1161                                   _reserved_words(0),
1162                                   _committed_words(0),
1163                                   _virtual_space_count(0) {
1164  MutexLockerEx cl(SpaceManager::expand_lock(),
1165                   Mutex::_no_safepoint_check_flag);
1166  create_new_virtual_space(word_size);
1167}
1168
1169VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1170                                   _is_class(true),
1171                                   _virtual_space_list(NULL),
1172                                   _current_virtual_space(NULL),
1173                                   _reserved_words(0),
1174                                   _committed_words(0),
1175                                   _virtual_space_count(0) {
1176  MutexLockerEx cl(SpaceManager::expand_lock(),
1177                   Mutex::_no_safepoint_check_flag);
1178  VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1179  bool succeeded = class_entry->initialize();
1180  if (succeeded) {
1181    link_vs(class_entry);
1182  }
1183}
1184
1185size_t VirtualSpaceList::free_bytes() {
1186  return virtual_space_list()->free_words_in_vs() * BytesPerWord;
1187}
1188
1189// Allocate another meta virtual space and add it to the list.
1190bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
1191  assert_lock_strong(SpaceManager::expand_lock());
1192
1193  if (is_class()) {
1194    assert(false, "We currently don't support more than one VirtualSpace for"
1195                  " the compressed class space. The initialization of the"
1196                  " CCS uses another code path and should not hit this path.");
1197    return false;
1198  }
1199
1200  if (vs_word_size == 0) {
1201    assert(false, "vs_word_size should always be at least _reserve_alignment large.");
1202    return false;
1203  }
1204
1205  // Reserve the space
1206  size_t vs_byte_size = vs_word_size * BytesPerWord;
1207  assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
1208
1209  // Allocate the meta virtual space and initialize it.
1210  VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1211  if (!new_entry->initialize()) {
1212    delete new_entry;
1213    return false;
1214  } else {
1215    assert(new_entry->reserved_words() == vs_word_size,
1216        "Reserved memory size differs from requested memory size");
1217    // ensure lock-free iteration sees fully initialized node
1218    OrderAccess::storestore();
1219    link_vs(new_entry);
1220    return true;
1221  }
1222}
1223
1224void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1225  if (virtual_space_list() == NULL) {
1226      set_virtual_space_list(new_entry);
1227  } else {
1228    current_virtual_space()->set_next(new_entry);
1229  }
1230  set_current_virtual_space(new_entry);
1231  inc_reserved_words(new_entry->reserved_words());
1232  inc_committed_words(new_entry->committed_words());
1233  inc_virtual_space_count();
1234#ifdef ASSERT
1235  new_entry->mangle();
1236#endif
1237  if (develop_log_is_enabled(Trace, gc, metaspace)) {
1238    LogHandle(gc, metaspace) log;
1239    VirtualSpaceNode* vsl = current_virtual_space();
1240    ResourceMark rm;
1241    vsl->print_on(log.trace_stream());
1242  }
1243}
1244
1245bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1246                                      size_t min_words,
1247                                      size_t preferred_words) {
1248  size_t before = node->committed_words();
1249
1250  bool result = node->expand_by(min_words, preferred_words);
1251
1252  size_t after = node->committed_words();
1253
1254  // after and before can be the same if the memory was pre-committed.
1255  assert(after >= before, "Inconsistency");
1256  inc_committed_words(after - before);
1257
1258  return result;
1259}
1260
1261bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
1262  assert_is_size_aligned(min_words,       Metaspace::commit_alignment_words());
1263  assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
1264  assert(min_words <= preferred_words, "Invalid arguments");
1265
1266  if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
1267    return  false;
1268  }
1269
1270  size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
1271  if (allowed_expansion_words < min_words) {
1272    return false;
1273  }
1274
1275  size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
1276
1277  // Commit more memory from the the current virtual space.
1278  bool vs_expanded = expand_node_by(current_virtual_space(),
1279                                    min_words,
1280                                    max_expansion_words);
1281  if (vs_expanded) {
1282    return true;
1283  }
1284  retire_current_virtual_space();
1285
1286  // Get another virtual space.
1287  size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
1288  grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
1289
1290  if (create_new_virtual_space(grow_vs_words)) {
1291    if (current_virtual_space()->is_pre_committed()) {
1292      // The memory was pre-committed, so we are done here.
1293      assert(min_words <= current_virtual_space()->committed_words(),
1294          "The new VirtualSpace was pre-committed, so it"
1295          "should be large enough to fit the alloc request.");
1296      return true;
1297    }
1298
1299    return expand_node_by(current_virtual_space(),
1300                          min_words,
1301                          max_expansion_words);
1302  }
1303
1304  return false;
1305}
1306
1307Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
1308                                           size_t grow_chunks_by_words,
1309                                           size_t medium_chunk_bunch) {
1310
1311  // Allocate a chunk out of the current virtual space.
1312  Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1313
1314  if (next != NULL) {
1315    return next;
1316  }
1317
1318  // The expand amount is currently only determined by the requested sizes
1319  // and not how much committed memory is left in the current virtual space.
1320
1321  size_t min_word_size       = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
1322  size_t preferred_word_size = align_size_up(medium_chunk_bunch,   Metaspace::commit_alignment_words());
1323  if (min_word_size >= preferred_word_size) {
1324    // Can happen when humongous chunks are allocated.
1325    preferred_word_size = min_word_size;
1326  }
1327
1328  bool expanded = expand_by(min_word_size, preferred_word_size);
1329  if (expanded) {
1330    next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1331    assert(next != NULL, "The allocation was expected to succeed after the expansion");
1332  }
1333
1334   return next;
1335}
1336
1337void VirtualSpaceList::print_on(outputStream* st) const {
1338  VirtualSpaceListIterator iter(virtual_space_list());
1339  while (iter.repeat()) {
1340    VirtualSpaceNode* node = iter.get_next();
1341    node->print_on(st);
1342  }
1343}
1344
1345// MetaspaceGC methods
1346
1347// VM_CollectForMetadataAllocation is the vm operation used to GC.
1348// Within the VM operation after the GC the attempt to allocate the metadata
1349// should succeed.  If the GC did not free enough space for the metaspace
1350// allocation, the HWM is increased so that another virtualspace will be
1351// allocated for the metadata.  With perm gen the increase in the perm
1352// gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
1353// metaspace policy uses those as the small and large steps for the HWM.
1354//
1355// After the GC the compute_new_size() for MetaspaceGC is called to
1356// resize the capacity of the metaspaces.  The current implementation
1357// is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1358// to resize the Java heap by some GC's.  New flags can be implemented
1359// if really needed.  MinMetaspaceFreeRatio is used to calculate how much
1360// free space is desirable in the metaspace capacity to decide how much
1361// to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
1362// free space is desirable in the metaspace capacity before decreasing
1363// the HWM.
1364
1365// Calculate the amount to increase the high water mark (HWM).
1366// Increase by a minimum amount (MinMetaspaceExpansion) so that
1367// another expansion is not requested too soon.  If that is not
1368// enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
1369// If that is still not enough, expand by the size of the allocation
1370// plus some.
1371size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
1372  size_t min_delta = MinMetaspaceExpansion;
1373  size_t max_delta = MaxMetaspaceExpansion;
1374  size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
1375
1376  if (delta <= min_delta) {
1377    delta = min_delta;
1378  } else if (delta <= max_delta) {
1379    // Don't want to hit the high water mark on the next
1380    // allocation so make the delta greater than just enough
1381    // for this allocation.
1382    delta = max_delta;
1383  } else {
1384    // This allocation is large but the next ones are probably not
1385    // so increase by the minimum.
1386    delta = delta + min_delta;
1387  }
1388
1389  assert_is_size_aligned(delta, Metaspace::commit_alignment());
1390
1391  return delta;
1392}
1393
1394size_t MetaspaceGC::capacity_until_GC() {
1395  size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
1396  assert(value >= MetaspaceSize, "Not initialized properly?");
1397  return value;
1398}
1399
1400bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
1401  assert_is_size_aligned(v, Metaspace::commit_alignment());
1402
1403  size_t capacity_until_GC = (size_t) _capacity_until_GC;
1404  size_t new_value = capacity_until_GC + v;
1405
1406  if (new_value < capacity_until_GC) {
1407    // The addition wrapped around, set new_value to aligned max value.
1408    new_value = align_size_down(max_uintx, Metaspace::commit_alignment());
1409  }
1410
1411  intptr_t expected = (intptr_t) capacity_until_GC;
1412  intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected);
1413
1414  if (expected != actual) {
1415    return false;
1416  }
1417
1418  if (new_cap_until_GC != NULL) {
1419    *new_cap_until_GC = new_value;
1420  }
1421  if (old_cap_until_GC != NULL) {
1422    *old_cap_until_GC = capacity_until_GC;
1423  }
1424  return true;
1425}
1426
1427size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1428  assert_is_size_aligned(v, Metaspace::commit_alignment());
1429
1430  return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
1431}
1432
1433void MetaspaceGC::initialize() {
1434  // Set the high-water mark to MaxMetapaceSize during VM initializaton since
1435  // we can't do a GC during initialization.
1436  _capacity_until_GC = MaxMetaspaceSize;
1437}
1438
1439void MetaspaceGC::post_initialize() {
1440  // Reset the high-water mark once the VM initialization is done.
1441  _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
1442}
1443
1444bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1445  // Check if the compressed class space is full.
1446  if (is_class && Metaspace::using_class_space()) {
1447    size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1448    if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1449      return false;
1450    }
1451  }
1452
1453  // Check if the user has imposed a limit on the metaspace memory.
1454  size_t committed_bytes = MetaspaceAux::committed_bytes();
1455  if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
1456    return false;
1457  }
1458
1459  return true;
1460}
1461
1462size_t MetaspaceGC::allowed_expansion() {
1463  size_t committed_bytes = MetaspaceAux::committed_bytes();
1464  size_t capacity_until_gc = capacity_until_GC();
1465
1466  assert(capacity_until_gc >= committed_bytes,
1467         "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
1468         capacity_until_gc, committed_bytes);
1469
1470  size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
1471  size_t left_until_GC = capacity_until_gc - committed_bytes;
1472  size_t left_to_commit = MIN2(left_until_GC, left_until_max);
1473
1474  return left_to_commit / BytesPerWord;
1475}
1476
1477void MetaspaceGC::compute_new_size() {
1478  assert(_shrink_factor <= 100, "invalid shrink factor");
1479  uint current_shrink_factor = _shrink_factor;
1480  _shrink_factor = 0;
1481
1482  // Using committed_bytes() for used_after_gc is an overestimation, since the
1483  // chunk free lists are included in committed_bytes() and the memory in an
1484  // un-fragmented chunk free list is available for future allocations.
1485  // However, if the chunk free lists becomes fragmented, then the memory may
1486  // not be available for future allocations and the memory is therefore "in use".
1487  // Including the chunk free lists in the definition of "in use" is therefore
1488  // necessary. Not including the chunk free lists can cause capacity_until_GC to
1489  // shrink below committed_bytes() and this has caused serious bugs in the past.
1490  const size_t used_after_gc = MetaspaceAux::committed_bytes();
1491  const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1492
1493  const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1494  const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1495
1496  const double min_tmp = used_after_gc / maximum_used_percentage;
1497  size_t minimum_desired_capacity =
1498    (size_t)MIN2(min_tmp, double(max_uintx));
1499  // Don't shrink less than the initial generation size
1500  minimum_desired_capacity = MAX2(minimum_desired_capacity,
1501                                  MetaspaceSize);
1502
1503  log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
1504  log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
1505                           minimum_free_percentage, maximum_used_percentage);
1506  log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
1507
1508
1509  size_t shrink_bytes = 0;
1510  if (capacity_until_GC < minimum_desired_capacity) {
1511    // If we have less capacity below the metaspace HWM, then
1512    // increment the HWM.
1513    size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1514    expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
1515    // Don't expand unless it's significant
1516    if (expand_bytes >= MinMetaspaceExpansion) {
1517      size_t new_capacity_until_GC = 0;
1518      bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
1519      assert(succeeded, "Should always succesfully increment HWM when at safepoint");
1520
1521      Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1522                                               new_capacity_until_GC,
1523                                               MetaspaceGCThresholdUpdater::ComputeNewSize);
1524      log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
1525                               minimum_desired_capacity / (double) K,
1526                               expand_bytes / (double) K,
1527                               MinMetaspaceExpansion / (double) K,
1528                               new_capacity_until_GC / (double) K);
1529    }
1530    return;
1531  }
1532
1533  // No expansion, now see if we want to shrink
1534  // We would never want to shrink more than this
1535  assert(capacity_until_GC >= minimum_desired_capacity,
1536         SIZE_FORMAT " >= " SIZE_FORMAT,
1537         capacity_until_GC, minimum_desired_capacity);
1538  size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1539
1540  // Should shrinking be considered?
1541  if (MaxMetaspaceFreeRatio < 100) {
1542    const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1543    const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1544    const double max_tmp = used_after_gc / minimum_used_percentage;
1545    size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1546    maximum_desired_capacity = MAX2(maximum_desired_capacity,
1547                                    MetaspaceSize);
1548    log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
1549                             maximum_free_percentage, minimum_used_percentage);
1550    log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
1551                             minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
1552
1553    assert(minimum_desired_capacity <= maximum_desired_capacity,
1554           "sanity check");
1555
1556    if (capacity_until_GC > maximum_desired_capacity) {
1557      // Capacity too large, compute shrinking size
1558      shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1559      // We don't want shrink all the way back to initSize if people call
1560      // System.gc(), because some programs do that between "phases" and then
1561      // we'd just have to grow the heap up again for the next phase.  So we
1562      // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1563      // on the third call, and 100% by the fourth call.  But if we recompute
1564      // size without shrinking, it goes back to 0%.
1565      shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1566
1567      shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
1568
1569      assert(shrink_bytes <= max_shrink_bytes,
1570             "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1571             shrink_bytes, max_shrink_bytes);
1572      if (current_shrink_factor == 0) {
1573        _shrink_factor = 10;
1574      } else {
1575        _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1576      }
1577      log_trace(gc, metaspace)("    shrinking:  initSize: %.1fK  maximum_desired_capacity: %.1fK",
1578                               MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
1579      log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
1580                               shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
1581    }
1582  }
1583
1584  // Don't shrink unless it's significant
1585  if (shrink_bytes >= MinMetaspaceExpansion &&
1586      ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1587    size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1588    Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1589                                             new_capacity_until_GC,
1590                                             MetaspaceGCThresholdUpdater::ComputeNewSize);
1591  }
1592}
1593
1594// Metadebug methods
1595
1596void Metadebug::init_allocation_fail_alot_count() {
1597  if (MetadataAllocationFailALot) {
1598    _allocation_fail_alot_count =
1599      1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1600  }
1601}
1602
1603#ifdef ASSERT
1604bool Metadebug::test_metadata_failure() {
1605  if (MetadataAllocationFailALot &&
1606      Threads::is_vm_complete()) {
1607    if (_allocation_fail_alot_count > 0) {
1608      _allocation_fail_alot_count--;
1609    } else {
1610      log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot");
1611      init_allocation_fail_alot_count();
1612      return true;
1613    }
1614  }
1615  return false;
1616}
1617#endif
1618
1619// ChunkManager methods
1620
1621size_t ChunkManager::free_chunks_total_words() {
1622  return _free_chunks_total;
1623}
1624
1625size_t ChunkManager::free_chunks_total_bytes() {
1626  return free_chunks_total_words() * BytesPerWord;
1627}
1628
1629size_t ChunkManager::free_chunks_count() {
1630#ifdef ASSERT
1631  if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1632    MutexLockerEx cl(SpaceManager::expand_lock(),
1633                     Mutex::_no_safepoint_check_flag);
1634    // This lock is only needed in debug because the verification
1635    // of the _free_chunks_totals walks the list of free chunks
1636    slow_locked_verify_free_chunks_count();
1637  }
1638#endif
1639  return _free_chunks_count;
1640}
1641
1642void ChunkManager::locked_verify_free_chunks_total() {
1643  assert_lock_strong(SpaceManager::expand_lock());
1644  assert(sum_free_chunks() == _free_chunks_total,
1645         "_free_chunks_total " SIZE_FORMAT " is not the"
1646         " same as sum " SIZE_FORMAT, _free_chunks_total,
1647         sum_free_chunks());
1648}
1649
1650void ChunkManager::verify_free_chunks_total() {
1651  MutexLockerEx cl(SpaceManager::expand_lock(),
1652                     Mutex::_no_safepoint_check_flag);
1653  locked_verify_free_chunks_total();
1654}
1655
1656void ChunkManager::locked_verify_free_chunks_count() {
1657  assert_lock_strong(SpaceManager::expand_lock());
1658  assert(sum_free_chunks_count() == _free_chunks_count,
1659         "_free_chunks_count " SIZE_FORMAT " is not the"
1660         " same as sum " SIZE_FORMAT, _free_chunks_count,
1661         sum_free_chunks_count());
1662}
1663
1664void ChunkManager::verify_free_chunks_count() {
1665#ifdef ASSERT
1666  MutexLockerEx cl(SpaceManager::expand_lock(),
1667                     Mutex::_no_safepoint_check_flag);
1668  locked_verify_free_chunks_count();
1669#endif
1670}
1671
1672void ChunkManager::verify() {
1673  MutexLockerEx cl(SpaceManager::expand_lock(),
1674                     Mutex::_no_safepoint_check_flag);
1675  locked_verify();
1676}
1677
1678void ChunkManager::locked_verify() {
1679  locked_verify_free_chunks_count();
1680  locked_verify_free_chunks_total();
1681}
1682
1683void ChunkManager::locked_print_free_chunks(outputStream* st) {
1684  assert_lock_strong(SpaceManager::expand_lock());
1685  st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1686                _free_chunks_total, _free_chunks_count);
1687}
1688
1689void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1690  assert_lock_strong(SpaceManager::expand_lock());
1691  st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1692                sum_free_chunks(), sum_free_chunks_count());
1693}
1694ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1695  return &_free_chunks[index];
1696}
1697
1698// These methods that sum the free chunk lists are used in printing
1699// methods that are used in product builds.
1700size_t ChunkManager::sum_free_chunks() {
1701  assert_lock_strong(SpaceManager::expand_lock());
1702  size_t result = 0;
1703  for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1704    ChunkList* list = free_chunks(i);
1705
1706    if (list == NULL) {
1707      continue;
1708    }
1709
1710    result = result + list->count() * list->size();
1711  }
1712  result = result + humongous_dictionary()->total_size();
1713  return result;
1714}
1715
1716size_t ChunkManager::sum_free_chunks_count() {
1717  assert_lock_strong(SpaceManager::expand_lock());
1718  size_t count = 0;
1719  for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1720    ChunkList* list = free_chunks(i);
1721    if (list == NULL) {
1722      continue;
1723    }
1724    count = count + list->count();
1725  }
1726  count = count + humongous_dictionary()->total_free_blocks();
1727  return count;
1728}
1729
1730ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1731  ChunkIndex index = list_index(word_size);
1732  assert(index < HumongousIndex, "No humongous list");
1733  return free_chunks(index);
1734}
1735
1736Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1737  assert_lock_strong(SpaceManager::expand_lock());
1738
1739  slow_locked_verify();
1740
1741  Metachunk* chunk = NULL;
1742  if (list_index(word_size) != HumongousIndex) {
1743    ChunkList* free_list = find_free_chunks_list(word_size);
1744    assert(free_list != NULL, "Sanity check");
1745
1746    chunk = free_list->head();
1747
1748    if (chunk == NULL) {
1749      return NULL;
1750    }
1751
1752    // Remove the chunk as the head of the list.
1753    free_list->remove_chunk(chunk);
1754
1755    log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list " PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1756                                       p2i(free_list), p2i(chunk), chunk->word_size());
1757  } else {
1758    chunk = humongous_dictionary()->get_chunk(
1759      word_size,
1760      FreeBlockDictionary<Metachunk>::atLeast);
1761
1762    if (chunk == NULL) {
1763      return NULL;
1764    }
1765
1766    log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
1767                                    chunk->word_size(), word_size, chunk->word_size() - word_size);
1768  }
1769
1770  // Chunk is being removed from the chunks free list.
1771  dec_free_chunks_total(chunk->word_size());
1772
1773  // Remove it from the links to this freelist
1774  chunk->set_next(NULL);
1775  chunk->set_prev(NULL);
1776#ifdef ASSERT
1777  // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1778  // work.
1779  chunk->set_is_tagged_free(false);
1780#endif
1781  chunk->container()->inc_container_count();
1782
1783  slow_locked_verify();
1784  return chunk;
1785}
1786
1787Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1788  assert_lock_strong(SpaceManager::expand_lock());
1789  slow_locked_verify();
1790
1791  // Take from the beginning of the list
1792  Metachunk* chunk = free_chunks_get(word_size);
1793  if (chunk == NULL) {
1794    return NULL;
1795  }
1796
1797  assert((word_size <= chunk->word_size()) ||
1798         list_index(chunk->word_size() == HumongousIndex),
1799         "Non-humongous variable sized chunk");
1800  LogHandle(gc, metaspace, freelist) log;
1801  if (log.is_debug()) {
1802    size_t list_count;
1803    if (list_index(word_size) < HumongousIndex) {
1804      ChunkList* list = find_free_chunks_list(word_size);
1805      list_count = list->count();
1806    } else {
1807      list_count = humongous_dictionary()->total_count();
1808    }
1809    log.debug("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1810               p2i(this), p2i(chunk), chunk->word_size(), list_count);
1811    ResourceMark rm;
1812    locked_print_free_chunks(log.debug_stream());
1813  }
1814
1815  return chunk;
1816}
1817
1818void ChunkManager::print_on(outputStream* out) const {
1819  const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics(out);
1820}
1821
1822// SpaceManager methods
1823
1824void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
1825                                           size_t* chunk_word_size,
1826                                           size_t* class_chunk_word_size) {
1827  switch (type) {
1828  case Metaspace::BootMetaspaceType:
1829    *chunk_word_size = Metaspace::first_chunk_word_size();
1830    *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
1831    break;
1832  case Metaspace::ROMetaspaceType:
1833    *chunk_word_size = SharedReadOnlySize / wordSize;
1834    *class_chunk_word_size = ClassSpecializedChunk;
1835    break;
1836  case Metaspace::ReadWriteMetaspaceType:
1837    *chunk_word_size = SharedReadWriteSize / wordSize;
1838    *class_chunk_word_size = ClassSpecializedChunk;
1839    break;
1840  case Metaspace::AnonymousMetaspaceType:
1841  case Metaspace::ReflectionMetaspaceType:
1842    *chunk_word_size = SpecializedChunk;
1843    *class_chunk_word_size = ClassSpecializedChunk;
1844    break;
1845  default:
1846    *chunk_word_size = SmallChunk;
1847    *class_chunk_word_size = ClassSmallChunk;
1848    break;
1849  }
1850  assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
1851         "Initial chunks sizes bad: data  " SIZE_FORMAT
1852         " class " SIZE_FORMAT,
1853         *chunk_word_size, *class_chunk_word_size);
1854}
1855
1856size_t SpaceManager::sum_free_in_chunks_in_use() const {
1857  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1858  size_t free = 0;
1859  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1860    Metachunk* chunk = chunks_in_use(i);
1861    while (chunk != NULL) {
1862      free += chunk->free_word_size();
1863      chunk = chunk->next();
1864    }
1865  }
1866  return free;
1867}
1868
1869size_t SpaceManager::sum_waste_in_chunks_in_use() const {
1870  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1871  size_t result = 0;
1872  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1873   result += sum_waste_in_chunks_in_use(i);
1874  }
1875
1876  return result;
1877}
1878
1879size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
1880  size_t result = 0;
1881  Metachunk* chunk = chunks_in_use(index);
1882  // Count the free space in all the chunk but not the
1883  // current chunk from which allocations are still being done.
1884  while (chunk != NULL) {
1885    if (chunk != current_chunk()) {
1886      result += chunk->free_word_size();
1887    }
1888    chunk = chunk->next();
1889  }
1890  return result;
1891}
1892
1893size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
1894  // For CMS use "allocated_chunks_words()" which does not need the
1895  // Metaspace lock.  For the other collectors sum over the
1896  // lists.  Use both methods as a check that "allocated_chunks_words()"
1897  // is correct.  That is, sum_capacity_in_chunks() is too expensive
1898  // to use in the product and allocated_chunks_words() should be used
1899  // but allow for  checking that allocated_chunks_words() returns the same
1900  // value as sum_capacity_in_chunks_in_use() which is the definitive
1901  // answer.
1902  if (UseConcMarkSweepGC) {
1903    return allocated_chunks_words();
1904  } else {
1905    MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1906    size_t sum = 0;
1907    for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1908      Metachunk* chunk = chunks_in_use(i);
1909      while (chunk != NULL) {
1910        sum += chunk->word_size();
1911        chunk = chunk->next();
1912      }
1913    }
1914  return sum;
1915  }
1916}
1917
1918size_t SpaceManager::sum_count_in_chunks_in_use() {
1919  size_t count = 0;
1920  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1921    count = count + sum_count_in_chunks_in_use(i);
1922  }
1923
1924  return count;
1925}
1926
1927size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
1928  size_t count = 0;
1929  Metachunk* chunk = chunks_in_use(i);
1930  while (chunk != NULL) {
1931    count++;
1932    chunk = chunk->next();
1933  }
1934  return count;
1935}
1936
1937
1938size_t SpaceManager::sum_used_in_chunks_in_use() const {
1939  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1940  size_t used = 0;
1941  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1942    Metachunk* chunk = chunks_in_use(i);
1943    while (chunk != NULL) {
1944      used += chunk->used_word_size();
1945      chunk = chunk->next();
1946    }
1947  }
1948  return used;
1949}
1950
1951void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
1952
1953  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1954    Metachunk* chunk = chunks_in_use(i);
1955    st->print("SpaceManager: %s " PTR_FORMAT,
1956                 chunk_size_name(i), p2i(chunk));
1957    if (chunk != NULL) {
1958      st->print_cr(" free " SIZE_FORMAT,
1959                   chunk->free_word_size());
1960    } else {
1961      st->cr();
1962    }
1963  }
1964
1965  chunk_manager()->locked_print_free_chunks(st);
1966  chunk_manager()->locked_print_sum_free_chunks(st);
1967}
1968
1969size_t SpaceManager::calc_chunk_size(size_t word_size) {
1970
1971  // Decide between a small chunk and a medium chunk.  Up to
1972  // _small_chunk_limit small chunks can be allocated.
1973  // After that a medium chunk is preferred.
1974  size_t chunk_word_size;
1975  if (chunks_in_use(MediumIndex) == NULL &&
1976      sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
1977    chunk_word_size = (size_t) small_chunk_size();
1978    if (word_size + Metachunk::overhead() > small_chunk_size()) {
1979      chunk_word_size = medium_chunk_size();
1980    }
1981  } else {
1982    chunk_word_size = medium_chunk_size();
1983  }
1984
1985  // Might still need a humongous chunk.  Enforce
1986  // humongous allocations sizes to be aligned up to
1987  // the smallest chunk size.
1988  size_t if_humongous_sized_chunk =
1989    align_size_up(word_size + Metachunk::overhead(),
1990                  smallest_chunk_size());
1991  chunk_word_size =
1992    MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
1993
1994  assert(!SpaceManager::is_humongous(word_size) ||
1995         chunk_word_size == if_humongous_sized_chunk,
1996         "Size calculation is wrong, word_size " SIZE_FORMAT
1997         " chunk_word_size " SIZE_FORMAT,
1998         word_size, chunk_word_size);
1999  LogHandle(gc, metaspace, alloc) log;
2000  if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
2001    log.debug("Metadata humongous allocation:");
2002    log.debug("  word_size " PTR_FORMAT, word_size);
2003    log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
2004    log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
2005  }
2006  return chunk_word_size;
2007}
2008
2009void SpaceManager::track_metaspace_memory_usage() {
2010  if (is_init_completed()) {
2011    if (is_class()) {
2012      MemoryService::track_compressed_class_memory_usage();
2013    }
2014    MemoryService::track_metaspace_memory_usage();
2015  }
2016}
2017
2018MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
2019  assert(vs_list()->current_virtual_space() != NULL,
2020         "Should have been set");
2021  assert(current_chunk() == NULL ||
2022         current_chunk()->allocate(word_size) == NULL,
2023         "Don't need to expand");
2024  MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2025
2026  if (log_is_enabled(Trace, gc, metaspace, freelist)) {
2027    size_t words_left = 0;
2028    size_t words_used = 0;
2029    if (current_chunk() != NULL) {
2030      words_left = current_chunk()->free_word_size();
2031      words_used = current_chunk()->used_word_size();
2032    }
2033    log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
2034                                       word_size, words_used, words_left);
2035  }
2036
2037  // Get another chunk
2038  size_t grow_chunks_by_words = calc_chunk_size(word_size);
2039  Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
2040
2041  MetaWord* mem = NULL;
2042
2043  // If a chunk was available, add it to the in-use chunk list
2044  // and do an allocation from it.
2045  if (next != NULL) {
2046    // Add to this manager's list of chunks in use.
2047    add_chunk(next, false);
2048    mem = next->allocate(word_size);
2049  }
2050
2051  // Track metaspace memory usage statistic.
2052  track_metaspace_memory_usage();
2053
2054  return mem;
2055}
2056
2057void SpaceManager::print_on(outputStream* st) const {
2058
2059  for (ChunkIndex i = ZeroIndex;
2060       i < NumberOfInUseLists ;
2061       i = next_chunk_index(i) ) {
2062    st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT,
2063                 p2i(chunks_in_use(i)),
2064                 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
2065  }
2066  st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
2067               " Humongous " SIZE_FORMAT,
2068               sum_waste_in_chunks_in_use(SmallIndex),
2069               sum_waste_in_chunks_in_use(MediumIndex),
2070               sum_waste_in_chunks_in_use(HumongousIndex));
2071  // block free lists
2072  if (block_freelists() != NULL) {
2073    st->print_cr("total in block free lists " SIZE_FORMAT,
2074      block_freelists()->total_size());
2075  }
2076}
2077
2078SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
2079                           Mutex* lock) :
2080  _mdtype(mdtype),
2081  _allocated_blocks_words(0),
2082  _allocated_chunks_words(0),
2083  _allocated_chunks_count(0),
2084  _lock(lock)
2085{
2086  initialize();
2087}
2088
2089void SpaceManager::inc_size_metrics(size_t words) {
2090  assert_lock_strong(SpaceManager::expand_lock());
2091  // Total of allocated Metachunks and allocated Metachunks count
2092  // for each SpaceManager
2093  _allocated_chunks_words = _allocated_chunks_words + words;
2094  _allocated_chunks_count++;
2095  // Global total of capacity in allocated Metachunks
2096  MetaspaceAux::inc_capacity(mdtype(), words);
2097  // Global total of allocated Metablocks.
2098  // used_words_slow() includes the overhead in each
2099  // Metachunk so include it in the used when the
2100  // Metachunk is first added (so only added once per
2101  // Metachunk).
2102  MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2103}
2104
2105void SpaceManager::inc_used_metrics(size_t words) {
2106  // Add to the per SpaceManager total
2107  Atomic::add_ptr(words, &_allocated_blocks_words);
2108  // Add to the global total
2109  MetaspaceAux::inc_used(mdtype(), words);
2110}
2111
2112void SpaceManager::dec_total_from_size_metrics() {
2113  MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2114  MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2115  // Also deduct the overhead per Metachunk
2116  MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2117}
2118
2119void SpaceManager::initialize() {
2120  Metadebug::init_allocation_fail_alot_count();
2121  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2122    _chunks_in_use[i] = NULL;
2123  }
2124  _current_chunk = NULL;
2125  log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
2126}
2127
2128void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
2129  if (chunks == NULL) {
2130    return;
2131  }
2132  ChunkList* list = free_chunks(index);
2133  assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
2134  assert_lock_strong(SpaceManager::expand_lock());
2135  Metachunk* cur = chunks;
2136
2137  // This returns chunks one at a time.  If a new
2138  // class List can be created that is a base class
2139  // of FreeList then something like FreeList::prepend()
2140  // can be used in place of this loop
2141  while (cur != NULL) {
2142    assert(cur->container() != NULL, "Container should have been set");
2143    cur->container()->dec_container_count();
2144    // Capture the next link before it is changed
2145    // by the call to return_chunk_at_head();
2146    Metachunk* next = cur->next();
2147    DEBUG_ONLY(cur->set_is_tagged_free(true);)
2148    list->return_chunk_at_head(cur);
2149    cur = next;
2150  }
2151}
2152
2153SpaceManager::~SpaceManager() {
2154  // This call this->_lock which can't be done while holding expand_lock()
2155  assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2156         "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2157         " allocated_chunks_words() " SIZE_FORMAT,
2158         sum_capacity_in_chunks_in_use(), allocated_chunks_words());
2159
2160  MutexLockerEx fcl(SpaceManager::expand_lock(),
2161                    Mutex::_no_safepoint_check_flag);
2162
2163  chunk_manager()->slow_locked_verify();
2164
2165  dec_total_from_size_metrics();
2166
2167  LogHandle(gc, metaspace, freelist) log;
2168  if (log.is_trace()) {
2169    log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
2170    ResourceMark rm;
2171    locked_print_chunks_in_use_on(log.trace_stream());
2172  }
2173
2174  // Do not mangle freed Metachunks.  The chunk size inside Metachunks
2175  // is during the freeing of a VirtualSpaceNodes.
2176
2177  // Have to update before the chunks_in_use lists are emptied
2178  // below.
2179  chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
2180                                         sum_count_in_chunks_in_use());
2181
2182  // Add all the chunks in use by this space manager
2183  // to the global list of free chunks.
2184
2185  // Follow each list of chunks-in-use and add them to the
2186  // free lists.  Each list is NULL terminated.
2187
2188  for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
2189    log.trace("returned " SIZE_FORMAT " %s chunks to freelist", sum_count_in_chunks_in_use(i), chunk_size_name(i));
2190    Metachunk* chunks = chunks_in_use(i);
2191    chunk_manager()->return_chunks(i, chunks);
2192    set_chunks_in_use(i, NULL);
2193    log.trace("updated freelist count " SSIZE_FORMAT " %s", chunk_manager()->free_chunks(i)->count(), chunk_size_name(i));
2194    assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2195  }
2196
2197  // The medium chunk case may be optimized by passing the head and
2198  // tail of the medium chunk list to add_at_head().  The tail is often
2199  // the current chunk but there are probably exceptions.
2200
2201  // Humongous chunks
2202  log.trace("returned " SIZE_FORMAT " %s humongous chunks to dictionary",
2203            sum_count_in_chunks_in_use(HumongousIndex), chunk_size_name(HumongousIndex));
2204  log.trace("Humongous chunk dictionary: ");
2205  // Humongous chunks are never the current chunk.
2206  Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2207
2208  while (humongous_chunks != NULL) {
2209#ifdef ASSERT
2210    humongous_chunks->set_is_tagged_free(true);
2211#endif
2212    log.trace(PTR_FORMAT " (" SIZE_FORMAT ") ", p2i(humongous_chunks), humongous_chunks->word_size());
2213    assert(humongous_chunks->word_size() == (size_t)
2214           align_size_up(humongous_chunks->word_size(),
2215                             smallest_chunk_size()),
2216           "Humongous chunk size is wrong: word size " SIZE_FORMAT
2217           " granularity " SIZE_FORMAT,
2218           humongous_chunks->word_size(), smallest_chunk_size());
2219    Metachunk* next_humongous_chunks = humongous_chunks->next();
2220    humongous_chunks->container()->dec_container_count();
2221    chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
2222    humongous_chunks = next_humongous_chunks;
2223  }
2224  log.trace("updated dictionary count " SIZE_FORMAT " %s", chunk_manager()->humongous_dictionary()->total_count(), chunk_size_name(HumongousIndex));
2225  chunk_manager()->slow_locked_verify();
2226}
2227
2228const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
2229  switch (index) {
2230    case SpecializedIndex:
2231      return "Specialized";
2232    case SmallIndex:
2233      return "Small";
2234    case MediumIndex:
2235      return "Medium";
2236    case HumongousIndex:
2237      return "Humongous";
2238    default:
2239      return NULL;
2240  }
2241}
2242
2243ChunkIndex ChunkManager::list_index(size_t size) {
2244  switch (size) {
2245    case SpecializedChunk:
2246      assert(SpecializedChunk == ClassSpecializedChunk,
2247             "Need branch for ClassSpecializedChunk");
2248      return SpecializedIndex;
2249    case SmallChunk:
2250    case ClassSmallChunk:
2251      return SmallIndex;
2252    case MediumChunk:
2253    case ClassMediumChunk:
2254      return MediumIndex;
2255    default:
2256      assert(size > MediumChunk || size > ClassMediumChunk,
2257             "Not a humongous chunk");
2258      return HumongousIndex;
2259  }
2260}
2261
2262void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2263  assert_lock_strong(_lock);
2264  size_t raw_word_size = get_raw_word_size(word_size);
2265  size_t min_size = TreeChunk<Metablock, FreeList<Metablock> >::min_size();
2266  assert(raw_word_size >= min_size,
2267         "Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size);
2268  block_freelists()->return_block(p, raw_word_size);
2269}
2270
2271// Adds a chunk to the list of chunks in use.
2272void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2273
2274  assert(new_chunk != NULL, "Should not be NULL");
2275  assert(new_chunk->next() == NULL, "Should not be on a list");
2276
2277  new_chunk->reset_empty();
2278
2279  // Find the correct list and and set the current
2280  // chunk for that list.
2281  ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
2282
2283  if (index != HumongousIndex) {
2284    retire_current_chunk();
2285    set_current_chunk(new_chunk);
2286    new_chunk->set_next(chunks_in_use(index));
2287    set_chunks_in_use(index, new_chunk);
2288  } else {
2289    // For null class loader data and DumpSharedSpaces, the first chunk isn't
2290    // small, so small will be null.  Link this first chunk as the current
2291    // chunk.
2292    if (make_current) {
2293      // Set as the current chunk but otherwise treat as a humongous chunk.
2294      set_current_chunk(new_chunk);
2295    }
2296    // Link at head.  The _current_chunk only points to a humongous chunk for
2297    // the null class loader metaspace (class and data virtual space managers)
2298    // any humongous chunks so will not point to the tail
2299    // of the humongous chunks list.
2300    new_chunk->set_next(chunks_in_use(HumongousIndex));
2301    set_chunks_in_use(HumongousIndex, new_chunk);
2302
2303    assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2304  }
2305
2306  // Add to the running sum of capacity
2307  inc_size_metrics(new_chunk->word_size());
2308
2309  assert(new_chunk->is_empty(), "Not ready for reuse");
2310  LogHandle(gc, metaspace, freelist) log;
2311  if (log.is_trace()) {
2312    log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
2313    ResourceMark rm;
2314    outputStream* out = log.trace_stream();
2315    new_chunk->print_on(out);
2316    chunk_manager()->locked_print_free_chunks(out);
2317  }
2318}
2319
2320void SpaceManager::retire_current_chunk() {
2321  if (current_chunk() != NULL) {
2322    size_t remaining_words = current_chunk()->free_word_size();
2323    if (remaining_words >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
2324      block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
2325      inc_used_metrics(remaining_words);
2326    }
2327  }
2328}
2329
2330Metachunk* SpaceManager::get_new_chunk(size_t word_size,
2331                                       size_t grow_chunks_by_words) {
2332  // Get a chunk from the chunk freelist
2333  Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
2334
2335  if (next == NULL) {
2336    next = vs_list()->get_new_chunk(word_size,
2337                                    grow_chunks_by_words,
2338                                    medium_chunk_bunch());
2339  }
2340
2341  LogHandle(gc, metaspace, alloc) log;
2342  if (log.is_debug() && next != NULL &&
2343      SpaceManager::is_humongous(next->word_size())) {
2344    log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());
2345  }
2346
2347  return next;
2348}
2349
2350/*
2351 * The policy is to allocate up to _small_chunk_limit small chunks
2352 * after which only medium chunks are allocated.  This is done to
2353 * reduce fragmentation.  In some cases, this can result in a lot
2354 * of small chunks being allocated to the point where it's not
2355 * possible to expand.  If this happens, there may be no medium chunks
2356 * available and OOME would be thrown.  Instead of doing that,
2357 * if the allocation request size fits in a small chunk, an attempt
2358 * will be made to allocate a small chunk.
2359 */
2360MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) {
2361  size_t raw_word_size = get_raw_word_size(word_size);
2362
2363  if (raw_word_size + Metachunk::overhead() > small_chunk_size()) {
2364    return NULL;
2365  }
2366
2367  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2368  MutexLockerEx cl1(expand_lock(), Mutex::_no_safepoint_check_flag);
2369
2370  Metachunk* chunk = chunk_manager()->chunk_freelist_allocate(small_chunk_size());
2371
2372  MetaWord* mem = NULL;
2373
2374  if (chunk != NULL) {
2375    // Add chunk to the in-use chunk list and do an allocation from it.
2376    // Add to this manager's list of chunks in use.
2377    add_chunk(chunk, false);
2378    mem = chunk->allocate(raw_word_size);
2379
2380    inc_used_metrics(raw_word_size);
2381
2382    // Track metaspace memory usage statistic.
2383    track_metaspace_memory_usage();
2384  }
2385
2386  return mem;
2387}
2388
2389MetaWord* SpaceManager::allocate(size_t word_size) {
2390  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2391
2392  size_t raw_word_size = get_raw_word_size(word_size);
2393  BlockFreelist* fl =  block_freelists();
2394  MetaWord* p = NULL;
2395  // Allocation from the dictionary is expensive in the sense that
2396  // the dictionary has to be searched for a size.  Don't allocate
2397  // from the dictionary until it starts to get fat.  Is this
2398  // a reasonable policy?  Maybe an skinny dictionary is fast enough
2399  // for allocations.  Do some profiling.  JJJ
2400  if (fl->total_size() > allocation_from_dictionary_limit) {
2401    p = fl->get_block(raw_word_size);
2402  }
2403  if (p == NULL) {
2404    p = allocate_work(raw_word_size);
2405  }
2406
2407  return p;
2408}
2409
2410// Returns the address of spaced allocated for "word_size".
2411// This methods does not know about blocks (Metablocks)
2412MetaWord* SpaceManager::allocate_work(size_t word_size) {
2413  assert_lock_strong(_lock);
2414#ifdef ASSERT
2415  if (Metadebug::test_metadata_failure()) {
2416    return NULL;
2417  }
2418#endif
2419  // Is there space in the current chunk?
2420  MetaWord* result = NULL;
2421
2422  // For DumpSharedSpaces, only allocate out of the current chunk which is
2423  // never null because we gave it the size we wanted.   Caller reports out
2424  // of memory if this returns null.
2425  if (DumpSharedSpaces) {
2426    assert(current_chunk() != NULL, "should never happen");
2427    inc_used_metrics(word_size);
2428    return current_chunk()->allocate(word_size); // caller handles null result
2429  }
2430
2431  if (current_chunk() != NULL) {
2432    result = current_chunk()->allocate(word_size);
2433  }
2434
2435  if (result == NULL) {
2436    result = grow_and_allocate(word_size);
2437  }
2438
2439  if (result != NULL) {
2440    inc_used_metrics(word_size);
2441    assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2442           "Head of the list is being allocated");
2443  }
2444
2445  return result;
2446}
2447
2448void SpaceManager::verify() {
2449  // If there are blocks in the dictionary, then
2450  // verification of chunks does not work since
2451  // being in the dictionary alters a chunk.
2452  if (block_freelists()->total_size() == 0) {
2453    for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2454      Metachunk* curr = chunks_in_use(i);
2455      while (curr != NULL) {
2456        curr->verify();
2457        verify_chunk_size(curr);
2458        curr = curr->next();
2459      }
2460    }
2461  }
2462}
2463
2464void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2465  assert(is_humongous(chunk->word_size()) ||
2466         chunk->word_size() == medium_chunk_size() ||
2467         chunk->word_size() == small_chunk_size() ||
2468         chunk->word_size() == specialized_chunk_size(),
2469         "Chunk size is wrong");
2470  return;
2471}
2472
2473#ifdef ASSERT
2474void SpaceManager::verify_allocated_blocks_words() {
2475  // Verification is only guaranteed at a safepoint.
2476  assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
2477    "Verification can fail if the applications is running");
2478  assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2479         "allocation total is not consistent " SIZE_FORMAT
2480         " vs " SIZE_FORMAT,
2481         allocated_blocks_words(), sum_used_in_chunks_in_use());
2482}
2483
2484#endif
2485
2486void SpaceManager::dump(outputStream* const out) const {
2487  size_t curr_total = 0;
2488  size_t waste = 0;
2489  uint i = 0;
2490  size_t used = 0;
2491  size_t capacity = 0;
2492
2493  // Add up statistics for all chunks in this SpaceManager.
2494  for (ChunkIndex index = ZeroIndex;
2495       index < NumberOfInUseLists;
2496       index = next_chunk_index(index)) {
2497    for (Metachunk* curr = chunks_in_use(index);
2498         curr != NULL;
2499         curr = curr->next()) {
2500      out->print("%d) ", i++);
2501      curr->print_on(out);
2502      curr_total += curr->word_size();
2503      used += curr->used_word_size();
2504      capacity += curr->word_size();
2505      waste += curr->free_word_size() + curr->overhead();;
2506    }
2507  }
2508
2509  if (log_is_enabled(Trace, gc, metaspace, freelist)) {
2510    block_freelists()->print_on(out);
2511  }
2512
2513  size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2514  // Free space isn't wasted.
2515  waste -= free;
2516
2517  out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
2518                " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2519                " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2520}
2521
2522#ifndef PRODUCT
2523void SpaceManager::mangle_freed_chunks() {
2524  for (ChunkIndex index = ZeroIndex;
2525       index < NumberOfInUseLists;
2526       index = next_chunk_index(index)) {
2527    for (Metachunk* curr = chunks_in_use(index);
2528         curr != NULL;
2529         curr = curr->next()) {
2530      curr->mangle();
2531    }
2532  }
2533}
2534#endif // PRODUCT
2535
2536// MetaspaceAux
2537
2538
2539size_t MetaspaceAux::_capacity_words[] = {0, 0};
2540size_t MetaspaceAux::_used_words[] = {0, 0};
2541
2542size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
2543  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2544  return list == NULL ? 0 : list->free_bytes();
2545}
2546
2547size_t MetaspaceAux::free_bytes() {
2548  return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
2549}
2550
2551void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2552  assert_lock_strong(SpaceManager::expand_lock());
2553  assert(words <= capacity_words(mdtype),
2554         "About to decrement below 0: words " SIZE_FORMAT
2555         " is greater than _capacity_words[%u] " SIZE_FORMAT,
2556         words, mdtype, capacity_words(mdtype));
2557  _capacity_words[mdtype] -= words;
2558}
2559
2560void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2561  assert_lock_strong(SpaceManager::expand_lock());
2562  // Needs to be atomic
2563  _capacity_words[mdtype] += words;
2564}
2565
2566void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2567  assert(words <= used_words(mdtype),
2568         "About to decrement below 0: words " SIZE_FORMAT
2569         " is greater than _used_words[%u] " SIZE_FORMAT,
2570         words, mdtype, used_words(mdtype));
2571  // For CMS deallocation of the Metaspaces occurs during the
2572  // sweep which is a concurrent phase.  Protection by the expand_lock()
2573  // is not enough since allocation is on a per Metaspace basis
2574  // and protected by the Metaspace lock.
2575  jlong minus_words = (jlong) - (jlong) words;
2576  Atomic::add_ptr(minus_words, &_used_words[mdtype]);
2577}
2578
2579void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2580  // _used_words tracks allocations for
2581  // each piece of metadata.  Those allocations are
2582  // generally done concurrently by different application
2583  // threads so must be done atomically.
2584  Atomic::add_ptr(words, &_used_words[mdtype]);
2585}
2586
2587size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2588  size_t used = 0;
2589  ClassLoaderDataGraphMetaspaceIterator iter;
2590  while (iter.repeat()) {
2591    Metaspace* msp = iter.get_next();
2592    // Sum allocated_blocks_words for each metaspace
2593    if (msp != NULL) {
2594      used += msp->used_words_slow(mdtype);
2595    }
2596  }
2597  return used * BytesPerWord;
2598}
2599
2600size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
2601  size_t free = 0;
2602  ClassLoaderDataGraphMetaspaceIterator iter;
2603  while (iter.repeat()) {
2604    Metaspace* msp = iter.get_next();
2605    if (msp != NULL) {
2606      free += msp->free_words_slow(mdtype);
2607    }
2608  }
2609  return free * BytesPerWord;
2610}
2611
2612size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2613  if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
2614    return 0;
2615  }
2616  // Don't count the space in the freelists.  That space will be
2617  // added to the capacity calculation as needed.
2618  size_t capacity = 0;
2619  ClassLoaderDataGraphMetaspaceIterator iter;
2620  while (iter.repeat()) {
2621    Metaspace* msp = iter.get_next();
2622    if (msp != NULL) {
2623      capacity += msp->capacity_words_slow(mdtype);
2624    }
2625  }
2626  return capacity * BytesPerWord;
2627}
2628
2629size_t MetaspaceAux::capacity_bytes_slow() {
2630#ifdef PRODUCT
2631  // Use capacity_bytes() in PRODUCT instead of this function.
2632  guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
2633#endif
2634  size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
2635  size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
2636  assert(capacity_bytes() == class_capacity + non_class_capacity,
2637         "bad accounting: capacity_bytes() " SIZE_FORMAT
2638         " class_capacity + non_class_capacity " SIZE_FORMAT
2639         " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
2640         capacity_bytes(), class_capacity + non_class_capacity,
2641         class_capacity, non_class_capacity);
2642
2643  return class_capacity + non_class_capacity;
2644}
2645
2646size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
2647  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2648  return list == NULL ? 0 : list->reserved_bytes();
2649}
2650
2651size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
2652  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2653  return list == NULL ? 0 : list->committed_bytes();
2654}
2655
2656size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
2657
2658size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
2659  ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
2660  if (chunk_manager == NULL) {
2661    return 0;
2662  }
2663  chunk_manager->slow_verify();
2664  return chunk_manager->free_chunks_total_words();
2665}
2666
2667size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
2668  return free_chunks_total_words(mdtype) * BytesPerWord;
2669}
2670
2671size_t MetaspaceAux::free_chunks_total_words() {
2672  return free_chunks_total_words(Metaspace::ClassType) +
2673         free_chunks_total_words(Metaspace::NonClassType);
2674}
2675
2676size_t MetaspaceAux::free_chunks_total_bytes() {
2677  return free_chunks_total_words() * BytesPerWord;
2678}
2679
2680bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) {
2681  return Metaspace::get_chunk_manager(mdtype) != NULL;
2682}
2683
2684MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
2685  if (!has_chunk_free_list(mdtype)) {
2686    return MetaspaceChunkFreeListSummary();
2687  }
2688
2689  const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
2690  return cm->chunk_free_list_summary();
2691}
2692
2693void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2694  log_info(gc, metaspace)("Metaspace: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
2695                          prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
2696}
2697
2698void MetaspaceAux::print_on(outputStream* out) {
2699  Metaspace::MetadataType nct = Metaspace::NonClassType;
2700
2701  out->print_cr(" Metaspace       "
2702                "used "      SIZE_FORMAT "K, "
2703                "capacity "  SIZE_FORMAT "K, "
2704                "committed " SIZE_FORMAT "K, "
2705                "reserved "  SIZE_FORMAT "K",
2706                used_bytes()/K,
2707                capacity_bytes()/K,
2708                committed_bytes()/K,
2709                reserved_bytes()/K);
2710
2711  if (Metaspace::using_class_space()) {
2712    Metaspace::MetadataType ct = Metaspace::ClassType;
2713    out->print_cr("  class space    "
2714                  "used "      SIZE_FORMAT "K, "
2715                  "capacity "  SIZE_FORMAT "K, "
2716                  "committed " SIZE_FORMAT "K, "
2717                  "reserved "  SIZE_FORMAT "K",
2718                  used_bytes(ct)/K,
2719                  capacity_bytes(ct)/K,
2720                  committed_bytes(ct)/K,
2721                  reserved_bytes(ct)/K);
2722  }
2723}
2724
2725// Print information for class space and data space separately.
2726// This is almost the same as above.
2727void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2728  size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
2729  size_t capacity_bytes = capacity_bytes_slow(mdtype);
2730  size_t used_bytes = used_bytes_slow(mdtype);
2731  size_t free_bytes = free_bytes_slow(mdtype);
2732  size_t used_and_free = used_bytes + free_bytes +
2733                           free_chunks_capacity_bytes;
2734  out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
2735             "K + unused in chunks " SIZE_FORMAT "K  + "
2736             " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2737             "K  capacity in allocated chunks " SIZE_FORMAT "K",
2738             used_bytes / K,
2739             free_bytes / K,
2740             free_chunks_capacity_bytes / K,
2741             used_and_free / K,
2742             capacity_bytes / K);
2743  // Accounting can only be correct if we got the values during a safepoint
2744  assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2745}
2746
2747// Print total fragmentation for class metaspaces
2748void MetaspaceAux::print_class_waste(outputStream* out) {
2749  assert(Metaspace::using_class_space(), "class metaspace not used");
2750  size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
2751  size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
2752  ClassLoaderDataGraphMetaspaceIterator iter;
2753  while (iter.repeat()) {
2754    Metaspace* msp = iter.get_next();
2755    if (msp != NULL) {
2756      cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2757      cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2758      cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2759      cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2760      cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2761      cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2762      cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2763    }
2764  }
2765  out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2766                SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2767                SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2768                "large count " SIZE_FORMAT,
2769                cls_specialized_count, cls_specialized_waste,
2770                cls_small_count, cls_small_waste,
2771                cls_medium_count, cls_medium_waste, cls_humongous_count);
2772}
2773
2774// Print total fragmentation for data and class metaspaces separately
2775void MetaspaceAux::print_waste(outputStream* out) {
2776  size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
2777  size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
2778
2779  ClassLoaderDataGraphMetaspaceIterator iter;
2780  while (iter.repeat()) {
2781    Metaspace* msp = iter.get_next();
2782    if (msp != NULL) {
2783      specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2784      specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2785      small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2786      small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2787      medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2788      medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2789      humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2790    }
2791  }
2792  out->print_cr("Total fragmentation waste (words) doesn't count free space");
2793  out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2794                        SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2795                        SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2796                        "large count " SIZE_FORMAT,
2797             specialized_count, specialized_waste, small_count,
2798             small_waste, medium_count, medium_waste, humongous_count);
2799  if (Metaspace::using_class_space()) {
2800    print_class_waste(out);
2801  }
2802}
2803
2804// Dump global metaspace things from the end of ClassLoaderDataGraph
2805void MetaspaceAux::dump(outputStream* out) {
2806  out->print_cr("All Metaspace:");
2807  out->print("data space: "); print_on(out, Metaspace::NonClassType);
2808  out->print("class space: "); print_on(out, Metaspace::ClassType);
2809  print_waste(out);
2810}
2811
2812void MetaspaceAux::verify_free_chunks() {
2813  Metaspace::chunk_manager_metadata()->verify();
2814  if (Metaspace::using_class_space()) {
2815    Metaspace::chunk_manager_class()->verify();
2816  }
2817}
2818
2819void MetaspaceAux::verify_capacity() {
2820#ifdef ASSERT
2821  size_t running_sum_capacity_bytes = capacity_bytes();
2822  // For purposes of the running sum of capacity, verify against capacity
2823  size_t capacity_in_use_bytes = capacity_bytes_slow();
2824  assert(running_sum_capacity_bytes == capacity_in_use_bytes,
2825         "capacity_words() * BytesPerWord " SIZE_FORMAT
2826         " capacity_bytes_slow()" SIZE_FORMAT,
2827         running_sum_capacity_bytes, capacity_in_use_bytes);
2828  for (Metaspace::MetadataType i = Metaspace::ClassType;
2829       i < Metaspace:: MetadataTypeCount;
2830       i = (Metaspace::MetadataType)(i + 1)) {
2831    size_t capacity_in_use_bytes = capacity_bytes_slow(i);
2832    assert(capacity_bytes(i) == capacity_in_use_bytes,
2833           "capacity_bytes(%u) " SIZE_FORMAT
2834           " capacity_bytes_slow(%u)" SIZE_FORMAT,
2835           i, capacity_bytes(i), i, capacity_in_use_bytes);
2836  }
2837#endif
2838}
2839
2840void MetaspaceAux::verify_used() {
2841#ifdef ASSERT
2842  size_t running_sum_used_bytes = used_bytes();
2843  // For purposes of the running sum of used, verify against used
2844  size_t used_in_use_bytes = used_bytes_slow();
2845  assert(used_bytes() == used_in_use_bytes,
2846         "used_bytes() " SIZE_FORMAT
2847         " used_bytes_slow()" SIZE_FORMAT,
2848         used_bytes(), used_in_use_bytes);
2849  for (Metaspace::MetadataType i = Metaspace::ClassType;
2850       i < Metaspace:: MetadataTypeCount;
2851       i = (Metaspace::MetadataType)(i + 1)) {
2852    size_t used_in_use_bytes = used_bytes_slow(i);
2853    assert(used_bytes(i) == used_in_use_bytes,
2854           "used_bytes(%u) " SIZE_FORMAT
2855           " used_bytes_slow(%u)" SIZE_FORMAT,
2856           i, used_bytes(i), i, used_in_use_bytes);
2857  }
2858#endif
2859}
2860
2861void MetaspaceAux::verify_metrics() {
2862  verify_capacity();
2863  verify_used();
2864}
2865
2866
2867// Metaspace methods
2868
2869size_t Metaspace::_first_chunk_word_size = 0;
2870size_t Metaspace::_first_class_chunk_word_size = 0;
2871
2872size_t Metaspace::_commit_alignment = 0;
2873size_t Metaspace::_reserve_alignment = 0;
2874
2875Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2876  initialize(lock, type);
2877}
2878
2879Metaspace::~Metaspace() {
2880  delete _vsm;
2881  if (using_class_space()) {
2882    delete _class_vsm;
2883  }
2884}
2885
2886VirtualSpaceList* Metaspace::_space_list = NULL;
2887VirtualSpaceList* Metaspace::_class_space_list = NULL;
2888
2889ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
2890ChunkManager* Metaspace::_chunk_manager_class = NULL;
2891
2892#define VIRTUALSPACEMULTIPLIER 2
2893
2894#ifdef _LP64
2895static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
2896
2897void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
2898  // Figure out the narrow_klass_base and the narrow_klass_shift.  The
2899  // narrow_klass_base is the lower of the metaspace base and the cds base
2900  // (if cds is enabled).  The narrow_klass_shift depends on the distance
2901  // between the lower base and higher address.
2902  address lower_base;
2903  address higher_address;
2904#if INCLUDE_CDS
2905  if (UseSharedSpaces) {
2906    higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2907                          (address)(metaspace_base + compressed_class_space_size()));
2908    lower_base = MIN2(metaspace_base, cds_base);
2909  } else
2910#endif
2911  {
2912    higher_address = metaspace_base + compressed_class_space_size();
2913    lower_base = metaspace_base;
2914
2915    uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
2916    // If compressed class space fits in lower 32G, we don't need a base.
2917    if (higher_address <= (address)klass_encoding_max) {
2918      lower_base = 0; // Effectively lower base is zero.
2919    }
2920  }
2921
2922  Universe::set_narrow_klass_base(lower_base);
2923
2924  if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
2925    Universe::set_narrow_klass_shift(0);
2926  } else {
2927    assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
2928    Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
2929  }
2930}
2931
2932#if INCLUDE_CDS
2933// Return TRUE if the specified metaspace_base and cds_base are close enough
2934// to work with compressed klass pointers.
2935bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
2936  assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
2937  assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2938  address lower_base = MIN2((address)metaspace_base, cds_base);
2939  address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2940                                (address)(metaspace_base + compressed_class_space_size()));
2941  return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
2942}
2943#endif
2944
2945// Try to allocate the metaspace at the requested addr.
2946void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
2947  assert(using_class_space(), "called improperly");
2948  assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2949  assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
2950         "Metaspace size is too big");
2951  assert_is_ptr_aligned(requested_addr, _reserve_alignment);
2952  assert_is_ptr_aligned(cds_base, _reserve_alignment);
2953  assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment);
2954
2955  // Don't use large pages for the class space.
2956  bool large_pages = false;
2957
2958#ifndef AARCH64
2959  ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
2960                                             _reserve_alignment,
2961                                             large_pages,
2962                                             requested_addr);
2963#else // AARCH64
2964  ReservedSpace metaspace_rs;
2965
2966  // Our compressed klass pointers may fit nicely into the lower 32
2967  // bits.
2968  if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
2969    metaspace_rs = ReservedSpace(compressed_class_space_size(),
2970                                             _reserve_alignment,
2971                                             large_pages,
2972                                             requested_addr);
2973  }
2974
2975  if (! metaspace_rs.is_reserved()) {
2976    // Try to align metaspace so that we can decode a compressed klass
2977    // with a single MOVK instruction.  We can do this iff the
2978    // compressed class base is a multiple of 4G.
2979    for (char *a = (char*)align_ptr_up(requested_addr, 4*G);
2980         a < (char*)(1024*G);
2981         a += 4*G) {
2982
2983#if INCLUDE_CDS
2984      if (UseSharedSpaces
2985          && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
2986        // We failed to find an aligned base that will reach.  Fall
2987        // back to using our requested addr.
2988        metaspace_rs = ReservedSpace(compressed_class_space_size(),
2989                                     _reserve_alignment,
2990                                     large_pages,
2991                                     requested_addr);
2992        break;
2993      }
2994#endif
2995
2996      metaspace_rs = ReservedSpace(compressed_class_space_size(),
2997                                   _reserve_alignment,
2998                                   large_pages,
2999                                   a);
3000      if (metaspace_rs.is_reserved())
3001        break;
3002    }
3003  }
3004
3005#endif // AARCH64
3006
3007  if (!metaspace_rs.is_reserved()) {
3008#if INCLUDE_CDS
3009    if (UseSharedSpaces) {
3010      size_t increment = align_size_up(1*G, _reserve_alignment);
3011
3012      // Keep trying to allocate the metaspace, increasing the requested_addr
3013      // by 1GB each time, until we reach an address that will no longer allow
3014      // use of CDS with compressed klass pointers.
3015      char *addr = requested_addr;
3016      while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
3017             can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
3018        addr = addr + increment;
3019        metaspace_rs = ReservedSpace(compressed_class_space_size(),
3020                                     _reserve_alignment, large_pages, addr);
3021      }
3022    }
3023#endif
3024    // If no successful allocation then try to allocate the space anywhere.  If
3025    // that fails then OOM doom.  At this point we cannot try allocating the
3026    // metaspace as if UseCompressedClassPointers is off because too much
3027    // initialization has happened that depends on UseCompressedClassPointers.
3028    // So, UseCompressedClassPointers cannot be turned off at this point.
3029    if (!metaspace_rs.is_reserved()) {
3030      metaspace_rs = ReservedSpace(compressed_class_space_size(),
3031                                   _reserve_alignment, large_pages);
3032      if (!metaspace_rs.is_reserved()) {
3033        vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
3034                                              compressed_class_space_size()));
3035      }
3036    }
3037  }
3038
3039  // If we got here then the metaspace got allocated.
3040  MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3041
3042#if INCLUDE_CDS
3043  // Verify that we can use shared spaces.  Otherwise, turn off CDS.
3044  if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3045    FileMapInfo::stop_sharing_and_unmap(
3046        "Could not allocate metaspace at a compatible address");
3047  }
3048#endif
3049  set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3050                                  UseSharedSpaces ? (address)cds_base : 0);
3051
3052  initialize_class_space(metaspace_rs);
3053
3054  if (develop_log_is_enabled(Trace, gc, metaspace)) {
3055    LogHandle(gc, metaspace) log;
3056    ResourceMark rm;
3057    print_compressed_class_space(log.trace_stream(), requested_addr);
3058  }
3059}
3060
3061void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
3062  st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
3063               p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
3064  if (_class_space_list != NULL) {
3065    address base = (address)_class_space_list->current_virtual_space()->bottom();
3066    st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
3067                 compressed_class_space_size(), p2i(base));
3068    if (requested_addr != 0) {
3069      st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
3070    }
3071    st->cr();
3072  }
3073}
3074
3075// For UseCompressedClassPointers the class space is reserved above the top of
3076// the Java heap.  The argument passed in is at the base of the compressed space.
3077void Metaspace::initialize_class_space(ReservedSpace rs) {
3078  // The reserved space size may be bigger because of alignment, esp with UseLargePages
3079  assert(rs.size() >= CompressedClassSpaceSize,
3080         SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
3081  assert(using_class_space(), "Must be using class space");
3082  _class_space_list = new VirtualSpaceList(rs);
3083  _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
3084
3085  if (!_class_space_list->initialization_succeeded()) {
3086    vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
3087  }
3088}
3089
3090#endif
3091
3092void Metaspace::ergo_initialize() {
3093  if (DumpSharedSpaces) {
3094    // Using large pages when dumping the shared archive is currently not implemented.
3095    FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
3096  }
3097
3098  size_t page_size = os::vm_page_size();
3099  if (UseLargePages && UseLargePagesInMetaspace) {
3100    page_size = os::large_page_size();
3101  }
3102
3103  _commit_alignment  = page_size;
3104  _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
3105
3106  // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
3107  // override if MaxMetaspaceSize was set on the command line or not.
3108  // This information is needed later to conform to the specification of the
3109  // java.lang.management.MemoryUsage API.
3110  //
3111  // Ideally, we would be able to set the default value of MaxMetaspaceSize in
3112  // globals.hpp to the aligned value, but this is not possible, since the
3113  // alignment depends on other flags being parsed.
3114  MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment);
3115
3116  if (MetaspaceSize > MaxMetaspaceSize) {
3117    MetaspaceSize = MaxMetaspaceSize;
3118  }
3119
3120  MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment);
3121
3122  assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
3123
3124  if (MetaspaceSize < 256*K) {
3125    vm_exit_during_initialization("Too small initial Metaspace size");
3126  }
3127
3128  MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment);
3129  MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
3130
3131  CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
3132  set_compressed_class_space_size(CompressedClassSpaceSize);
3133}
3134
3135void Metaspace::global_initialize() {
3136  MetaspaceGC::initialize();
3137
3138  // Initialize the alignment for shared spaces.
3139  int max_alignment = os::vm_allocation_granularity();
3140  size_t cds_total = 0;
3141
3142  MetaspaceShared::set_max_alignment(max_alignment);
3143
3144  if (DumpSharedSpaces) {
3145#if INCLUDE_CDS
3146    MetaspaceShared::estimate_regions_size();
3147
3148    SharedReadOnlySize  = align_size_up(SharedReadOnlySize,  max_alignment);
3149    SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
3150    SharedMiscDataSize  = align_size_up(SharedMiscDataSize,  max_alignment);
3151    SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize,  max_alignment);
3152
3153    // Initialize with the sum of the shared space sizes.  The read-only
3154    // and read write metaspace chunks will be allocated out of this and the
3155    // remainder is the misc code and data chunks.
3156    cds_total = FileMapInfo::shared_spaces_size();
3157    cds_total = align_size_up(cds_total, _reserve_alignment);
3158    _space_list = new VirtualSpaceList(cds_total/wordSize);
3159    _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3160
3161    if (!_space_list->initialization_succeeded()) {
3162      vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3163    }
3164
3165#ifdef _LP64
3166    if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
3167      vm_exit_during_initialization("Unable to dump shared archive.",
3168          err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3169                  SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3170                  "klass limit: " UINT64_FORMAT, cds_total, compressed_class_space_size(),
3171                  cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
3172    }
3173
3174    // Set the compressed klass pointer base so that decoding of these pointers works
3175    // properly when creating the shared archive.
3176    assert(UseCompressedOops && UseCompressedClassPointers,
3177      "UseCompressedOops and UseCompressedClassPointers must be set");
3178    Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
3179    log_develop_trace(gc, metaspace)("Setting_narrow_klass_base to Address: " PTR_FORMAT,
3180                                     p2i(_space_list->current_virtual_space()->bottom()));
3181
3182    Universe::set_narrow_klass_shift(0);
3183#endif // _LP64
3184#endif // INCLUDE_CDS
3185  } else {
3186    // If using shared space, open the file that contains the shared space
3187    // and map in the memory before initializing the rest of metaspace (so
3188    // the addresses don't conflict)
3189    address cds_address = NULL;
3190    if (UseSharedSpaces) {
3191#if INCLUDE_CDS
3192      FileMapInfo* mapinfo = new FileMapInfo();
3193
3194      // Open the shared archive file, read and validate the header. If
3195      // initialization fails, shared spaces [UseSharedSpaces] are
3196      // disabled and the file is closed.
3197      // Map in spaces now also
3198      if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3199        cds_total = FileMapInfo::shared_spaces_size();
3200        cds_address = (address)mapinfo->header()->region_addr(0);
3201#ifdef _LP64
3202        if (using_class_space()) {
3203          char* cds_end = (char*)(cds_address + cds_total);
3204          cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
3205          // If UseCompressedClassPointers is set then allocate the metaspace area
3206          // above the heap and above the CDS area (if it exists).
3207          allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3208          // Map the shared string space after compressed pointers
3209          // because it relies on compressed class pointers setting to work
3210          mapinfo->map_string_regions();
3211        }
3212#endif // _LP64
3213      } else {
3214        assert(!mapinfo->is_open() && !UseSharedSpaces,
3215               "archive file not closed or shared spaces not disabled.");
3216      }
3217#endif // INCLUDE_CDS
3218    }
3219
3220#ifdef _LP64
3221    if (!UseSharedSpaces && using_class_space()) {
3222      char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
3223      allocate_metaspace_compressed_klass_ptrs(base, 0);
3224    }
3225#endif // _LP64
3226
3227    // Initialize these before initializing the VirtualSpaceList
3228    _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3229    _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3230    // Make the first class chunk bigger than a medium chunk so it's not put
3231    // on the medium chunk list.   The next chunk will be small and progress
3232    // from there.  This size calculated by -version.
3233    _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3234                                       (CompressedClassSpaceSize/BytesPerWord)*2);
3235    _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3236    // Arbitrarily set the initial virtual space to a multiple
3237    // of the boot class loader size.
3238    size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3239    word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
3240
3241    // Initialize the list of virtual spaces.
3242    _space_list = new VirtualSpaceList(word_size);
3243    _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3244
3245    if (!_space_list->initialization_succeeded()) {
3246      vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
3247    }
3248  }
3249
3250  _tracer = new MetaspaceTracer();
3251}
3252
3253void Metaspace::post_initialize() {
3254  MetaspaceGC::post_initialize();
3255}
3256
3257Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
3258                                               size_t chunk_word_size,
3259                                               size_t chunk_bunch) {
3260  // Get a chunk from the chunk freelist
3261  Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
3262  if (chunk != NULL) {
3263    return chunk;
3264  }
3265
3266  return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch);
3267}
3268
3269void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3270
3271  assert(space_list() != NULL,
3272    "Metadata VirtualSpaceList has not been initialized");
3273  assert(chunk_manager_metadata() != NULL,
3274    "Metadata ChunkManager has not been initialized");
3275
3276  _vsm = new SpaceManager(NonClassType, lock);
3277  if (_vsm == NULL) {
3278    return;
3279  }
3280  size_t word_size;
3281  size_t class_word_size;
3282  vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
3283
3284  if (using_class_space()) {
3285  assert(class_space_list() != NULL,
3286    "Class VirtualSpaceList has not been initialized");
3287  assert(chunk_manager_class() != NULL,
3288    "Class ChunkManager has not been initialized");
3289
3290    // Allocate SpaceManager for classes.
3291    _class_vsm = new SpaceManager(ClassType, lock);
3292    if (_class_vsm == NULL) {
3293      return;
3294    }
3295  }
3296
3297  MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3298
3299  // Allocate chunk for metadata objects
3300  Metachunk* new_chunk = get_initialization_chunk(NonClassType,
3301                                                  word_size,
3302                                                  vsm()->medium_chunk_bunch());
3303  // For dumping shared archive, report error if allocation has failed.
3304  if (DumpSharedSpaces && new_chunk == NULL) {
3305    report_insufficient_metaspace(MetaspaceAux::committed_bytes() + word_size * BytesPerWord);
3306  }
3307  assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
3308  if (new_chunk != NULL) {
3309    // Add to this manager's list of chunks in use and current_chunk().
3310    vsm()->add_chunk(new_chunk, true);
3311  }
3312
3313  // Allocate chunk for class metadata objects
3314  if (using_class_space()) {
3315    Metachunk* class_chunk = get_initialization_chunk(ClassType,
3316                                                      class_word_size,
3317                                                      class_vsm()->medium_chunk_bunch());
3318    if (class_chunk != NULL) {
3319      class_vsm()->add_chunk(class_chunk, true);
3320    } else {
3321      // For dumping shared archive, report error if allocation has failed.
3322      if (DumpSharedSpaces) {
3323        report_insufficient_metaspace(MetaspaceAux::committed_bytes() + class_word_size * BytesPerWord);
3324      }
3325    }
3326  }
3327
3328  _alloc_record_head = NULL;
3329  _alloc_record_tail = NULL;
3330}
3331
3332size_t Metaspace::align_word_size_up(size_t word_size) {
3333  size_t byte_size = word_size * wordSize;
3334  return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
3335}
3336
3337MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
3338  // DumpSharedSpaces doesn't use class metadata area (yet)
3339  // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
3340  if (is_class_space_allocation(mdtype)) {
3341    return  class_vsm()->allocate(word_size);
3342  } else {
3343    return  vsm()->allocate(word_size);
3344  }
3345}
3346
3347MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3348  size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3349  assert(delta_bytes > 0, "Must be");
3350
3351  size_t before = 0;
3352  size_t after = 0;
3353  MetaWord* res;
3354  bool incremented;
3355
3356  // Each thread increments the HWM at most once. Even if the thread fails to increment
3357  // the HWM, an allocation is still attempted. This is because another thread must then
3358  // have incremented the HWM and therefore the allocation might still succeed.
3359  do {
3360    incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
3361    res = allocate(word_size, mdtype);
3362  } while (!incremented && res == NULL);
3363
3364  if (incremented) {
3365    tracer()->report_gc_threshold(before, after,
3366                                  MetaspaceGCThresholdUpdater::ExpandAndAllocate);
3367    log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
3368  }
3369
3370  return res;
3371}
3372
3373// Space allocated in the Metaspace.  This may
3374// be across several metadata virtual spaces.
3375char* Metaspace::bottom() const {
3376  assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
3377  return (char*)vsm()->current_chunk()->bottom();
3378}
3379
3380size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3381  if (mdtype == ClassType) {
3382    return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3383  } else {
3384    return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
3385  }
3386}
3387
3388size_t Metaspace::free_words_slow(MetadataType mdtype) const {
3389  if (mdtype == ClassType) {
3390    return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
3391  } else {
3392    return vsm()->sum_free_in_chunks_in_use();
3393  }
3394}
3395
3396// Space capacity in the Metaspace.  It includes
3397// space in the list of chunks from which allocations
3398// have been made. Don't include space in the global freelist and
3399// in the space available in the dictionary which
3400// is already counted in some chunk.
3401size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
3402  if (mdtype == ClassType) {
3403    return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
3404  } else {
3405    return vsm()->sum_capacity_in_chunks_in_use();
3406  }
3407}
3408
3409size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
3410  return used_words_slow(mdtype) * BytesPerWord;
3411}
3412
3413size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
3414  return capacity_words_slow(mdtype) * BytesPerWord;
3415}
3416
3417size_t Metaspace::allocated_blocks_bytes() const {
3418  return vsm()->allocated_blocks_bytes() +
3419      (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
3420}
3421
3422size_t Metaspace::allocated_chunks_bytes() const {
3423  return vsm()->allocated_chunks_bytes() +
3424      (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
3425}
3426
3427void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
3428  assert(!SafepointSynchronize::is_at_safepoint()
3429         || Thread::current()->is_VM_thread(), "should be the VM thread");
3430
3431  if (DumpSharedSpaces && PrintSharedSpaces) {
3432    record_deallocation(ptr, vsm()->get_raw_word_size(word_size));
3433  }
3434
3435  MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3436
3437  if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
3438    // Dark matter.  Too small for dictionary.
3439#ifdef ASSERT
3440    Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3441#endif
3442    return;
3443  }
3444  if (is_class && using_class_space()) {
3445    class_vsm()->deallocate(ptr, word_size);
3446  } else {
3447    vsm()->deallocate(ptr, word_size);
3448  }
3449}
3450
3451
3452MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3453                              bool read_only, MetaspaceObj::Type type, TRAPS) {
3454  if (HAS_PENDING_EXCEPTION) {
3455    assert(false, "Should not allocate with exception pending");
3456    return NULL;  // caller does a CHECK_NULL too
3457  }
3458
3459  assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3460        "ClassLoaderData::the_null_class_loader_data() should have been used.");
3461
3462  // Allocate in metaspaces without taking out a lock, because it deadlocks
3463  // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
3464  // to revisit this for application class data sharing.
3465  if (DumpSharedSpaces) {
3466    assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3467    Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3468    MetaWord* result = space->allocate(word_size, NonClassType);
3469    if (result == NULL) {
3470      report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3471    }
3472    if (PrintSharedSpaces) {
3473      space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3474    }
3475
3476    // Zero initialize.
3477    Copy::fill_to_words((HeapWord*)result, word_size, 0);
3478
3479    return result;
3480  }
3481
3482  MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3483
3484  // Try to allocate metadata.
3485  MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3486
3487  if (result == NULL) {
3488    tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
3489
3490    // Allocation failed.
3491    if (is_init_completed()) {
3492      // Only start a GC if the bootstrapping has completed.
3493
3494      // Try to clean out some memory and retry.
3495      result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3496          loader_data, word_size, mdtype);
3497    }
3498  }
3499
3500  if (result == NULL) {
3501    SpaceManager* sm;
3502    if (is_class_space_allocation(mdtype)) {
3503      sm = loader_data->metaspace_non_null()->class_vsm();
3504    } else {
3505      sm = loader_data->metaspace_non_null()->vsm();
3506    }
3507
3508    result = sm->get_small_chunk_and_allocate(word_size);
3509
3510    if (result == NULL) {
3511      report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
3512    }
3513  }
3514
3515  // Zero initialize.
3516  Copy::fill_to_words((HeapWord*)result, word_size, 0);
3517
3518  return result;
3519}
3520
3521size_t Metaspace::class_chunk_size(size_t word_size) {
3522  assert(using_class_space(), "Has to use class space");
3523  return class_vsm()->calc_chunk_size(word_size);
3524}
3525
3526void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
3527  tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
3528
3529  // If result is still null, we are out of memory.
3530  LogHandle(gc, metaspace, freelist) log;
3531  if (log.is_trace()) {
3532    log.trace("Metaspace allocation failed for size " SIZE_FORMAT, word_size);
3533    ResourceMark rm;
3534    outputStream* out = log.trace_stream();
3535    if (loader_data->metaspace_or_null() != NULL) {
3536      loader_data->dump(out);
3537    }
3538    MetaspaceAux::dump(out);
3539  }
3540
3541  bool out_of_compressed_class_space = false;
3542  if (is_class_space_allocation(mdtype)) {
3543    Metaspace* metaspace = loader_data->metaspace_non_null();
3544    out_of_compressed_class_space =
3545      MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3546      (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3547      CompressedClassSpaceSize;
3548  }
3549
3550  // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3551  const char* space_string = out_of_compressed_class_space ?
3552    "Compressed class space" : "Metaspace";
3553
3554  report_java_out_of_memory(space_string);
3555
3556  if (JvmtiExport::should_post_resource_exhausted()) {
3557    JvmtiExport::post_resource_exhausted(
3558        JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3559        space_string);
3560  }
3561
3562  if (!is_init_completed()) {
3563    vm_exit_during_initialization("OutOfMemoryError", space_string);
3564  }
3565
3566  if (out_of_compressed_class_space) {
3567    THROW_OOP(Universe::out_of_memory_error_class_metaspace());
3568  } else {
3569    THROW_OOP(Universe::out_of_memory_error_metaspace());
3570  }
3571}
3572
3573const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
3574  switch (mdtype) {
3575    case Metaspace::ClassType: return "Class";
3576    case Metaspace::NonClassType: return "Metadata";
3577    default:
3578      assert(false, "Got bad mdtype: %d", (int) mdtype);
3579      return NULL;
3580  }
3581}
3582
3583void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3584  assert(DumpSharedSpaces, "sanity");
3585
3586  int byte_size = (int)word_size * wordSize;
3587  AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size);
3588
3589  if (_alloc_record_head == NULL) {
3590    _alloc_record_head = _alloc_record_tail = rec;
3591  } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) {
3592    _alloc_record_tail->_next = rec;
3593    _alloc_record_tail = rec;
3594  } else {
3595    // slow linear search, but this doesn't happen that often, and only when dumping
3596    for (AllocRecord *old = _alloc_record_head; old; old = old->_next) {
3597      if (old->_ptr == ptr) {
3598        assert(old->_type == MetaspaceObj::DeallocatedType, "sanity");
3599        int remain_bytes = old->_byte_size - byte_size;
3600        assert(remain_bytes >= 0, "sanity");
3601        old->_type = type;
3602
3603        if (remain_bytes == 0) {
3604          delete(rec);
3605        } else {
3606          address remain_ptr = address(ptr) + byte_size;
3607          rec->_ptr = remain_ptr;
3608          rec->_byte_size = remain_bytes;
3609          rec->_type = MetaspaceObj::DeallocatedType;
3610          rec->_next = old->_next;
3611          old->_byte_size = byte_size;
3612          old->_next = rec;
3613        }
3614        return;
3615      }
3616    }
3617    assert(0, "reallocating a freed pointer that was not recorded");
3618  }
3619}
3620
3621void Metaspace::record_deallocation(void* ptr, size_t word_size) {
3622  assert(DumpSharedSpaces, "sanity");
3623
3624  for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3625    if (rec->_ptr == ptr) {
3626      assert(rec->_byte_size == (int)word_size * wordSize, "sanity");
3627      rec->_type = MetaspaceObj::DeallocatedType;
3628      return;
3629    }
3630  }
3631
3632  assert(0, "deallocating a pointer that was not recorded");
3633}
3634
3635void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3636  assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3637
3638  address last_addr = (address)bottom();
3639
3640  for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3641    address ptr = rec->_ptr;
3642    if (last_addr < ptr) {
3643      closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
3644    }
3645    closure->doit(ptr, rec->_type, rec->_byte_size);
3646    last_addr = ptr + rec->_byte_size;
3647  }
3648
3649  address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
3650  if (last_addr < top) {
3651    closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
3652  }
3653}
3654
3655void Metaspace::purge(MetadataType mdtype) {
3656  get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
3657}
3658
3659void Metaspace::purge() {
3660  MutexLockerEx cl(SpaceManager::expand_lock(),
3661                   Mutex::_no_safepoint_check_flag);
3662  purge(NonClassType);
3663  if (using_class_space()) {
3664    purge(ClassType);
3665  }
3666}
3667
3668void Metaspace::print_on(outputStream* out) const {
3669  // Print both class virtual space counts and metaspace.
3670  if (Verbose) {
3671    vsm()->print_on(out);
3672    if (using_class_space()) {
3673      class_vsm()->print_on(out);
3674    }
3675  }
3676}
3677
3678bool Metaspace::contains(const void* ptr) {
3679  if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) {
3680    return true;
3681  }
3682
3683  if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
3684     return true;
3685  }
3686
3687  return get_space_list(NonClassType)->contains(ptr);
3688}
3689
3690void Metaspace::verify() {
3691  vsm()->verify();
3692  if (using_class_space()) {
3693    class_vsm()->verify();
3694  }
3695}
3696
3697void Metaspace::dump(outputStream* const out) const {
3698  out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm()));
3699  vsm()->dump(out);
3700  if (using_class_space()) {
3701    out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm()));
3702    class_vsm()->dump(out);
3703  }
3704}
3705
3706/////////////// Unit tests ///////////////
3707
3708#ifndef PRODUCT
3709
3710class TestMetaspaceAuxTest : AllStatic {
3711 public:
3712  static void test_reserved() {
3713    size_t reserved = MetaspaceAux::reserved_bytes();
3714
3715    assert(reserved > 0, "assert");
3716
3717    size_t committed  = MetaspaceAux::committed_bytes();
3718    assert(committed <= reserved, "assert");
3719
3720    size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
3721    assert(reserved_metadata > 0, "assert");
3722    assert(reserved_metadata <= reserved, "assert");
3723
3724    if (UseCompressedClassPointers) {
3725      size_t reserved_class    = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
3726      assert(reserved_class > 0, "assert");
3727      assert(reserved_class < reserved, "assert");
3728    }
3729  }
3730
3731  static void test_committed() {
3732    size_t committed = MetaspaceAux::committed_bytes();
3733
3734    assert(committed > 0, "assert");
3735
3736    size_t reserved  = MetaspaceAux::reserved_bytes();
3737    assert(committed <= reserved, "assert");
3738
3739    size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
3740    assert(committed_metadata > 0, "assert");
3741    assert(committed_metadata <= committed, "assert");
3742
3743    if (UseCompressedClassPointers) {
3744      size_t committed_class    = MetaspaceAux::committed_bytes(Metaspace::ClassType);
3745      assert(committed_class > 0, "assert");
3746      assert(committed_class < committed, "assert");
3747    }
3748  }
3749
3750  static void test_virtual_space_list_large_chunk() {
3751    VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
3752    MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3753    // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
3754    // vm_allocation_granularity aligned on Windows.
3755    size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
3756    large_size += (os::vm_page_size()/BytesPerWord);
3757    vs_list->get_new_chunk(large_size, large_size, 0);
3758  }
3759
3760  static void test() {
3761    test_reserved();
3762    test_committed();
3763    test_virtual_space_list_large_chunk();
3764  }
3765};
3766
3767void TestMetaspaceAux_test() {
3768  TestMetaspaceAuxTest::test();
3769}
3770
3771class TestVirtualSpaceNodeTest {
3772  static void chunk_up(size_t words_left, size_t& num_medium_chunks,
3773                                          size_t& num_small_chunks,
3774                                          size_t& num_specialized_chunks) {
3775    num_medium_chunks = words_left / MediumChunk;
3776    words_left = words_left % MediumChunk;
3777
3778    num_small_chunks = words_left / SmallChunk;
3779    words_left = words_left % SmallChunk;
3780    // how many specialized chunks can we get?
3781    num_specialized_chunks = words_left / SpecializedChunk;
3782    assert(words_left % SpecializedChunk == 0, "should be nothing left");
3783  }
3784
3785 public:
3786  static void test() {
3787    MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3788    const size_t vsn_test_size_words = MediumChunk  * 4;
3789    const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
3790
3791    // The chunk sizes must be multiples of eachother, or this will fail
3792    STATIC_ASSERT(MediumChunk % SmallChunk == 0);
3793    STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
3794
3795    { // No committed memory in VSN
3796      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3797      VirtualSpaceNode vsn(vsn_test_size_bytes);
3798      vsn.initialize();
3799      vsn.retire(&cm);
3800      assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
3801    }
3802
3803    { // All of VSN is committed, half is used by chunks
3804      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3805      VirtualSpaceNode vsn(vsn_test_size_bytes);
3806      vsn.initialize();
3807      vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
3808      vsn.get_chunk_vs(MediumChunk);
3809      vsn.get_chunk_vs(MediumChunk);
3810      vsn.retire(&cm);
3811      assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
3812      assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
3813    }
3814
3815    const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
3816    // This doesn't work for systems with vm_page_size >= 16K.
3817    if (page_chunks < MediumChunk) {
3818      // 4 pages of VSN is committed, some is used by chunks
3819      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3820      VirtualSpaceNode vsn(vsn_test_size_bytes);
3821
3822      vsn.initialize();
3823      vsn.expand_by(page_chunks, page_chunks);
3824      vsn.get_chunk_vs(SmallChunk);
3825      vsn.get_chunk_vs(SpecializedChunk);
3826      vsn.retire(&cm);
3827
3828      // committed - used = words left to retire
3829      const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
3830
3831      size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3832      chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3833
3834      assert(num_medium_chunks == 0, "should not get any medium chunks");
3835      assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3836      assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3837    }
3838
3839    { // Half of VSN is committed, a humongous chunk is used
3840      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3841      VirtualSpaceNode vsn(vsn_test_size_bytes);
3842      vsn.initialize();
3843      vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
3844      vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
3845      vsn.retire(&cm);
3846
3847      const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
3848      size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3849      chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3850
3851      assert(num_medium_chunks == 0, "should not get any medium chunks");
3852      assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3853      assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3854    }
3855
3856  }
3857
3858#define assert_is_available_positive(word_size) \
3859  assert(vsn.is_available(word_size), \
3860         #word_size ": " PTR_FORMAT " bytes were not available in " \
3861         "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3862         (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
3863
3864#define assert_is_available_negative(word_size) \
3865  assert(!vsn.is_available(word_size), \
3866         #word_size ": " PTR_FORMAT " bytes should not be available in " \
3867         "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3868         (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
3869
3870  static void test_is_available_positive() {
3871    // Reserve some memory.
3872    VirtualSpaceNode vsn(os::vm_allocation_granularity());
3873    assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3874
3875    // Commit some memory.
3876    size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3877    bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3878    assert(expanded, "Failed to commit");
3879
3880    // Check that is_available accepts the committed size.
3881    assert_is_available_positive(commit_word_size);
3882
3883    // Check that is_available accepts half the committed size.
3884    size_t expand_word_size = commit_word_size / 2;
3885    assert_is_available_positive(expand_word_size);
3886  }
3887
3888  static void test_is_available_negative() {
3889    // Reserve some memory.
3890    VirtualSpaceNode vsn(os::vm_allocation_granularity());
3891    assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3892
3893    // Commit some memory.
3894    size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3895    bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3896    assert(expanded, "Failed to commit");
3897
3898    // Check that is_available doesn't accept a too large size.
3899    size_t two_times_commit_word_size = commit_word_size * 2;
3900    assert_is_available_negative(two_times_commit_word_size);
3901  }
3902
3903  static void test_is_available_overflow() {
3904    // Reserve some memory.
3905    VirtualSpaceNode vsn(os::vm_allocation_granularity());
3906    assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3907
3908    // Commit some memory.
3909    size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3910    bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3911    assert(expanded, "Failed to commit");
3912
3913    // Calculate a size that will overflow the virtual space size.
3914    void* virtual_space_max = (void*)(uintptr_t)-1;
3915    size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
3916    size_t overflow_size = bottom_to_max + BytesPerWord;
3917    size_t overflow_word_size = overflow_size / BytesPerWord;
3918
3919    // Check that is_available can handle the overflow.
3920    assert_is_available_negative(overflow_word_size);
3921  }
3922
3923  static void test_is_available() {
3924    TestVirtualSpaceNodeTest::test_is_available_positive();
3925    TestVirtualSpaceNodeTest::test_is_available_negative();
3926    TestVirtualSpaceNodeTest::test_is_available_overflow();
3927  }
3928};
3929
3930void TestVirtualSpaceNode_test() {
3931  TestVirtualSpaceNodeTest::test();
3932  TestVirtualSpaceNodeTest::test_is_available();
3933}
3934#endif
3935