metaspace.cpp revision 8413:92457dfb91bd
1209139Srpaulo/*
2209139Srpaulo * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
3209139Srpaulo * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4209139Srpaulo *
5252726Srpaulo * This code is free software; you can redistribute it and/or modify it
6252726Srpaulo * under the terms of the GNU General Public License version 2 only, as
7209139Srpaulo * published by the Free Software Foundation.
8209139Srpaulo *
9209139Srpaulo * This code is distributed in the hope that it will be useful, but WITHOUT
10209139Srpaulo * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11209139Srpaulo * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12209139Srpaulo * version 2 for more details (a copy is included in the LICENSE file that
13209139Srpaulo * accompanied this code).
14209139Srpaulo *
15209139Srpaulo * You should have received a copy of the GNU General Public License version
16209139Srpaulo * 2 along with this work; if not, write to the Free Software Foundation,
17209139Srpaulo * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18209139Srpaulo *
19209139Srpaulo * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20209139Srpaulo * or visit www.oracle.com if you need additional information or have any
21209139Srpaulo * questions.
22209139Srpaulo *
23209139Srpaulo */
24209139Srpaulo#include "precompiled.hpp"
25209139Srpaulo#include "gc/shared/collectedHeap.hpp"
26209139Srpaulo#include "gc/shared/collectorPolicy.hpp"
27209139Srpaulo#include "gc/shared/gcLocker.hpp"
28209139Srpaulo#include "memory/allocation.hpp"
29209139Srpaulo#include "memory/binaryTreeDictionary.hpp"
30209139Srpaulo#include "memory/filemap.hpp"
31209139Srpaulo#include "memory/freeList.hpp"
32209139Srpaulo#include "memory/metachunk.hpp"
33209139Srpaulo#include "memory/metaspace.hpp"
34209139Srpaulo#include "memory/metaspaceGCThresholdUpdater.hpp"
35209139Srpaulo#include "memory/metaspaceShared.hpp"
36209139Srpaulo#include "memory/metaspaceTracer.hpp"
37209139Srpaulo#include "memory/resourceArea.hpp"
38209139Srpaulo#include "memory/universe.hpp"
39209139Srpaulo#include "runtime/atomic.inline.hpp"
40209139Srpaulo#include "runtime/globals.hpp"
41209139Srpaulo#include "runtime/init.hpp"
42209139Srpaulo#include "runtime/java.hpp"
43209139Srpaulo#include "runtime/mutex.hpp"
44209139Srpaulo#include "runtime/orderAccess.inline.hpp"
45209139Srpaulo#include "services/memTracker.hpp"
46209139Srpaulo#include "services/memoryService.hpp"
47209139Srpaulo#include "utilities/copy.hpp"
48209139Srpaulo#include "utilities/debug.hpp"
49209139Srpaulo#include "utilities/macros.hpp"
50209139Srpaulo
51209139Srpaulotypedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
52209139Srpaulotypedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
53209139Srpaulo
54209139Srpaulo// Set this constant to enable slow integrity checking of the free chunk lists
55209139Srpauloconst bool metaspace_slow_verify = false;
56209139Srpaulo
57209139Srpaulosize_t const allocation_from_dictionary_limit = 4 * K;
58209139Srpaulo
59209139SrpauloMetaWord* last_allocated = 0;
60209139Srpaulo
61209139Srpaulosize_t Metaspace::_compressed_class_space_size;
62209139Srpauloconst MetaspaceTracer* Metaspace::_tracer = NULL;
63209139Srpaulo
64209139Srpaulo// Used in declarations in SpaceManager and ChunkManager
65209139Srpauloenum ChunkIndex {
66209139Srpaulo  ZeroIndex = 0,
67209139Srpaulo  SpecializedIndex = ZeroIndex,
68209139Srpaulo  SmallIndex = SpecializedIndex + 1,
69209139Srpaulo  MediumIndex = SmallIndex + 1,
70209139Srpaulo  HumongousIndex = MediumIndex + 1,
71209139Srpaulo  NumberOfFreeLists = 3,
72209139Srpaulo  NumberOfInUseLists = 4
73209139Srpaulo};
74209139Srpaulo
75209139Srpauloenum ChunkSizes {    // in words.
76209139Srpaulo  ClassSpecializedChunk = 128,
77209139Srpaulo  SpecializedChunk = 128,
78209139Srpaulo  ClassSmallChunk = 256,
79209139Srpaulo  SmallChunk = 512,
80209139Srpaulo  ClassMediumChunk = 4 * K,
81209139Srpaulo  MediumChunk = 8 * K
82209139Srpaulo};
83209139Srpaulo
84209139Srpaulostatic ChunkIndex next_chunk_index(ChunkIndex i) {
85209139Srpaulo  assert(i < NumberOfInUseLists, "Out of bound");
86209139Srpaulo  return (ChunkIndex) (i+1);
87209139Srpaulo}
88209139Srpaulo
89209139Srpaulovolatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
90209139Srpaulouint MetaspaceGC::_shrink_factor = 0;
91209139Srpaulobool MetaspaceGC::_should_concurrent_collect = false;
92209139Srpaulo
93209139Srpaulotypedef class FreeList<Metachunk> ChunkList;
94209139Srpaulo
95209139Srpaulo// Manages the global free lists of chunks.
96209139Srpauloclass ChunkManager : public CHeapObj<mtInternal> {
97209139Srpaulo  friend class TestVirtualSpaceNodeTest;
98209139Srpaulo
99209139Srpaulo  // Free list of chunks of different sizes.
100  //   SpecializedChunk
101  //   SmallChunk
102  //   MediumChunk
103  //   HumongousChunk
104  ChunkList _free_chunks[NumberOfFreeLists];
105
106  //   HumongousChunk
107  ChunkTreeDictionary _humongous_dictionary;
108
109  // ChunkManager in all lists of this type
110  size_t _free_chunks_total;
111  size_t _free_chunks_count;
112
113  void dec_free_chunks_total(size_t v) {
114    assert(_free_chunks_count > 0 &&
115             _free_chunks_total > 0,
116             "About to go negative");
117    Atomic::add_ptr(-1, &_free_chunks_count);
118    jlong minus_v = (jlong) - (jlong) v;
119    Atomic::add_ptr(minus_v, &_free_chunks_total);
120  }
121
122  // Debug support
123
124  size_t sum_free_chunks();
125  size_t sum_free_chunks_count();
126
127  void locked_verify_free_chunks_total();
128  void slow_locked_verify_free_chunks_total() {
129    if (metaspace_slow_verify) {
130      locked_verify_free_chunks_total();
131    }
132  }
133  void locked_verify_free_chunks_count();
134  void slow_locked_verify_free_chunks_count() {
135    if (metaspace_slow_verify) {
136      locked_verify_free_chunks_count();
137    }
138  }
139  void verify_free_chunks_count();
140
141 public:
142
143  ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
144      : _free_chunks_total(0), _free_chunks_count(0) {
145    _free_chunks[SpecializedIndex].set_size(specialized_size);
146    _free_chunks[SmallIndex].set_size(small_size);
147    _free_chunks[MediumIndex].set_size(medium_size);
148  }
149
150  // add or delete (return) a chunk to the global freelist.
151  Metachunk* chunk_freelist_allocate(size_t word_size);
152
153  // Map a size to a list index assuming that there are lists
154  // for special, small, medium, and humongous chunks.
155  static ChunkIndex list_index(size_t size);
156
157  // Remove the chunk from its freelist.  It is
158  // expected to be on one of the _free_chunks[] lists.
159  void remove_chunk(Metachunk* chunk);
160
161  // Add the simple linked list of chunks to the freelist of chunks
162  // of type index.
163  void return_chunks(ChunkIndex index, Metachunk* chunks);
164
165  // Total of the space in the free chunks list
166  size_t free_chunks_total_words();
167  size_t free_chunks_total_bytes();
168
169  // Number of chunks in the free chunks list
170  size_t free_chunks_count();
171
172  void inc_free_chunks_total(size_t v, size_t count = 1) {
173    Atomic::add_ptr(count, &_free_chunks_count);
174    Atomic::add_ptr(v, &_free_chunks_total);
175  }
176  ChunkTreeDictionary* humongous_dictionary() {
177    return &_humongous_dictionary;
178  }
179
180  ChunkList* free_chunks(ChunkIndex index);
181
182  // Returns the list for the given chunk word size.
183  ChunkList* find_free_chunks_list(size_t word_size);
184
185  // Remove from a list by size.  Selects list based on size of chunk.
186  Metachunk* free_chunks_get(size_t chunk_word_size);
187
188#define index_bounds_check(index)                                         \
189  assert(index == SpecializedIndex ||                                     \
190         index == SmallIndex ||                                           \
191         index == MediumIndex ||                                          \
192         index == HumongousIndex, err_msg("Bad index: %d", (int) index))
193
194  size_t num_free_chunks(ChunkIndex index) const {
195    index_bounds_check(index);
196
197    if (index == HumongousIndex) {
198      return _humongous_dictionary.total_free_blocks();
199    }
200
201    ssize_t count = _free_chunks[index].count();
202    return count == -1 ? 0 : (size_t) count;
203  }
204
205  size_t size_free_chunks_in_bytes(ChunkIndex index) const {
206    index_bounds_check(index);
207
208    size_t word_size = 0;
209    if (index == HumongousIndex) {
210      word_size = _humongous_dictionary.total_size();
211    } else {
212      const size_t size_per_chunk_in_words = _free_chunks[index].size();
213      word_size = size_per_chunk_in_words * num_free_chunks(index);
214    }
215
216    return word_size * BytesPerWord;
217  }
218
219  MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
220    return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
221                                         num_free_chunks(SmallIndex),
222                                         num_free_chunks(MediumIndex),
223                                         num_free_chunks(HumongousIndex),
224                                         size_free_chunks_in_bytes(SpecializedIndex),
225                                         size_free_chunks_in_bytes(SmallIndex),
226                                         size_free_chunks_in_bytes(MediumIndex),
227                                         size_free_chunks_in_bytes(HumongousIndex));
228  }
229
230  // Debug support
231  void verify();
232  void slow_verify() {
233    if (metaspace_slow_verify) {
234      verify();
235    }
236  }
237  void locked_verify();
238  void slow_locked_verify() {
239    if (metaspace_slow_verify) {
240      locked_verify();
241    }
242  }
243  void verify_free_chunks_total();
244
245  void locked_print_free_chunks(outputStream* st);
246  void locked_print_sum_free_chunks(outputStream* st);
247
248  void print_on(outputStream* st) const;
249};
250
251// Used to manage the free list of Metablocks (a block corresponds
252// to the allocation of a quantum of metadata).
253class BlockFreelist VALUE_OBJ_CLASS_SPEC {
254  BlockTreeDictionary* const _dictionary;
255
256  // Only allocate and split from freelist if the size of the allocation
257  // is at least 1/4th the size of the available block.
258  const static int WasteMultiplier = 4;
259
260  // Accessors
261  BlockTreeDictionary* dictionary() const { return _dictionary; }
262
263 public:
264  BlockFreelist();
265  ~BlockFreelist();
266
267  // Get and return a block to the free list
268  MetaWord* get_block(size_t word_size);
269  void return_block(MetaWord* p, size_t word_size);
270
271  size_t total_size() { return dictionary()->total_size(); }
272
273  void print_on(outputStream* st) const;
274};
275
276// A VirtualSpaceList node.
277class VirtualSpaceNode : public CHeapObj<mtClass> {
278  friend class VirtualSpaceList;
279
280  // Link to next VirtualSpaceNode
281  VirtualSpaceNode* _next;
282
283  // total in the VirtualSpace
284  MemRegion _reserved;
285  ReservedSpace _rs;
286  VirtualSpace _virtual_space;
287  MetaWord* _top;
288  // count of chunks contained in this VirtualSpace
289  uintx _container_count;
290
291  // Convenience functions to access the _virtual_space
292  char* low()  const { return virtual_space()->low(); }
293  char* high() const { return virtual_space()->high(); }
294
295  // The first Metachunk will be allocated at the bottom of the
296  // VirtualSpace
297  Metachunk* first_chunk() { return (Metachunk*) bottom(); }
298
299  // Committed but unused space in the virtual space
300  size_t free_words_in_vs() const;
301 public:
302
303  VirtualSpaceNode(size_t byte_size);
304  VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
305  ~VirtualSpaceNode();
306
307  // Convenience functions for logical bottom and end
308  MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
309  MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
310
311  bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
312
313  size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
314  size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
315
316  bool is_pre_committed() const { return _virtual_space.special(); }
317
318  // address of next available space in _virtual_space;
319  // Accessors
320  VirtualSpaceNode* next() { return _next; }
321  void set_next(VirtualSpaceNode* v) { _next = v; }
322
323  void set_reserved(MemRegion const v) { _reserved = v; }
324  void set_top(MetaWord* v) { _top = v; }
325
326  // Accessors
327  MemRegion* reserved() { return &_reserved; }
328  VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
329
330  // Returns true if "word_size" is available in the VirtualSpace
331  bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
332
333  MetaWord* top() const { return _top; }
334  void inc_top(size_t word_size) { _top += word_size; }
335
336  uintx container_count() { return _container_count; }
337  void inc_container_count();
338  void dec_container_count();
339#ifdef ASSERT
340  uintx container_count_slow();
341  void verify_container_count();
342#endif
343
344  // used and capacity in this single entry in the list
345  size_t used_words_in_vs() const;
346  size_t capacity_words_in_vs() const;
347
348  bool initialize();
349
350  // get space from the virtual space
351  Metachunk* take_from_committed(size_t chunk_word_size);
352
353  // Allocate a chunk from the virtual space and return it.
354  Metachunk* get_chunk_vs(size_t chunk_word_size);
355
356  // Expands/shrinks the committed space in a virtual space.  Delegates
357  // to Virtualspace
358  bool expand_by(size_t min_words, size_t preferred_words);
359
360  // In preparation for deleting this node, remove all the chunks
361  // in the node from any freelist.
362  void purge(ChunkManager* chunk_manager);
363
364  // If an allocation doesn't fit in the current node a new node is created.
365  // Allocate chunks out of the remaining committed space in this node
366  // to avoid wasting that memory.
367  // This always adds up because all the chunk sizes are multiples of
368  // the smallest chunk size.
369  void retire(ChunkManager* chunk_manager);
370
371#ifdef ASSERT
372  // Debug support
373  void mangle();
374#endif
375
376  void print_on(outputStream* st) const;
377};
378
379#define assert_is_ptr_aligned(ptr, alignment) \
380  assert(is_ptr_aligned(ptr, alignment),      \
381    err_msg(PTR_FORMAT " is not aligned to "  \
382      SIZE_FORMAT, p2i(ptr), alignment))
383
384#define assert_is_size_aligned(size, alignment) \
385  assert(is_size_aligned(size, alignment),      \
386    err_msg(SIZE_FORMAT " is not aligned to "   \
387       SIZE_FORMAT, size, alignment))
388
389
390// Decide if large pages should be committed when the memory is reserved.
391static bool should_commit_large_pages_when_reserving(size_t bytes) {
392  if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
393    size_t words = bytes / BytesPerWord;
394    bool is_class = false; // We never reserve large pages for the class space.
395    if (MetaspaceGC::can_expand(words, is_class) &&
396        MetaspaceGC::allowed_expansion() >= words) {
397      return true;
398    }
399  }
400
401  return false;
402}
403
404  // byte_size is the size of the associated virtualspace.
405VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
406  assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
407
408#if INCLUDE_CDS
409  // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
410  // configurable address, generally at the top of the Java heap so other
411  // memory addresses don't conflict.
412  if (DumpSharedSpaces) {
413    bool large_pages = false; // No large pages when dumping the CDS archive.
414    char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
415
416    _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base);
417    if (_rs.is_reserved()) {
418      assert(shared_base == 0 || _rs.base() == shared_base, "should match");
419    } else {
420      // Get a mmap region anywhere if the SharedBaseAddress fails.
421      _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
422    }
423    MetaspaceShared::set_shared_rs(&_rs);
424  } else
425#endif
426  {
427    bool large_pages = should_commit_large_pages_when_reserving(bytes);
428
429    _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
430  }
431
432  if (_rs.is_reserved()) {
433    assert(_rs.base() != NULL, "Catch if we get a NULL address");
434    assert(_rs.size() != 0, "Catch if we get a 0 size");
435    assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
436    assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
437
438    MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
439  }
440}
441
442void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
443  Metachunk* chunk = first_chunk();
444  Metachunk* invalid_chunk = (Metachunk*) top();
445  while (chunk < invalid_chunk ) {
446    assert(chunk->is_tagged_free(), "Should be tagged free");
447    MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
448    chunk_manager->remove_chunk(chunk);
449    assert(chunk->next() == NULL &&
450           chunk->prev() == NULL,
451           "Was not removed from its list");
452    chunk = (Metachunk*) next;
453  }
454}
455
456#ifdef ASSERT
457uintx VirtualSpaceNode::container_count_slow() {
458  uintx count = 0;
459  Metachunk* chunk = first_chunk();
460  Metachunk* invalid_chunk = (Metachunk*) top();
461  while (chunk < invalid_chunk ) {
462    MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
463    // Don't count the chunks on the free lists.  Those are
464    // still part of the VirtualSpaceNode but not currently
465    // counted.
466    if (!chunk->is_tagged_free()) {
467      count++;
468    }
469    chunk = (Metachunk*) next;
470  }
471  return count;
472}
473#endif
474
475// List of VirtualSpaces for metadata allocation.
476class VirtualSpaceList : public CHeapObj<mtClass> {
477  friend class VirtualSpaceNode;
478
479  enum VirtualSpaceSizes {
480    VirtualSpaceSize = 256 * K
481  };
482
483  // Head of the list
484  VirtualSpaceNode* _virtual_space_list;
485  // virtual space currently being used for allocations
486  VirtualSpaceNode* _current_virtual_space;
487
488  // Is this VirtualSpaceList used for the compressed class space
489  bool _is_class;
490
491  // Sum of reserved and committed memory in the virtual spaces
492  size_t _reserved_words;
493  size_t _committed_words;
494
495  // Number of virtual spaces
496  size_t _virtual_space_count;
497
498  ~VirtualSpaceList();
499
500  VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
501
502  void set_virtual_space_list(VirtualSpaceNode* v) {
503    _virtual_space_list = v;
504  }
505  void set_current_virtual_space(VirtualSpaceNode* v) {
506    _current_virtual_space = v;
507  }
508
509  void link_vs(VirtualSpaceNode* new_entry);
510
511  // Get another virtual space and add it to the list.  This
512  // is typically prompted by a failed attempt to allocate a chunk
513  // and is typically followed by the allocation of a chunk.
514  bool create_new_virtual_space(size_t vs_word_size);
515
516  // Chunk up the unused committed space in the current
517  // virtual space and add the chunks to the free list.
518  void retire_current_virtual_space();
519
520 public:
521  VirtualSpaceList(size_t word_size);
522  VirtualSpaceList(ReservedSpace rs);
523
524  size_t free_bytes();
525
526  Metachunk* get_new_chunk(size_t word_size,
527                           size_t grow_chunks_by_words,
528                           size_t medium_chunk_bunch);
529
530  bool expand_node_by(VirtualSpaceNode* node,
531                      size_t min_words,
532                      size_t preferred_words);
533
534  bool expand_by(size_t min_words,
535                 size_t preferred_words);
536
537  VirtualSpaceNode* current_virtual_space() {
538    return _current_virtual_space;
539  }
540
541  bool is_class() const { return _is_class; }
542
543  bool initialization_succeeded() { return _virtual_space_list != NULL; }
544
545  size_t reserved_words()  { return _reserved_words; }
546  size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
547  size_t committed_words() { return _committed_words; }
548  size_t committed_bytes() { return committed_words() * BytesPerWord; }
549
550  void inc_reserved_words(size_t v);
551  void dec_reserved_words(size_t v);
552  void inc_committed_words(size_t v);
553  void dec_committed_words(size_t v);
554  void inc_virtual_space_count();
555  void dec_virtual_space_count();
556
557  bool contains(const void* ptr);
558
559  // Unlink empty VirtualSpaceNodes and free it.
560  void purge(ChunkManager* chunk_manager);
561
562  void print_on(outputStream* st) const;
563
564  class VirtualSpaceListIterator : public StackObj {
565    VirtualSpaceNode* _virtual_spaces;
566   public:
567    VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
568      _virtual_spaces(virtual_spaces) {}
569
570    bool repeat() {
571      return _virtual_spaces != NULL;
572    }
573
574    VirtualSpaceNode* get_next() {
575      VirtualSpaceNode* result = _virtual_spaces;
576      if (_virtual_spaces != NULL) {
577        _virtual_spaces = _virtual_spaces->next();
578      }
579      return result;
580    }
581  };
582};
583
584class Metadebug : AllStatic {
585  // Debugging support for Metaspaces
586  static int _allocation_fail_alot_count;
587
588 public:
589
590  static void init_allocation_fail_alot_count();
591#ifdef ASSERT
592  static bool test_metadata_failure();
593#endif
594};
595
596int Metadebug::_allocation_fail_alot_count = 0;
597
598//  SpaceManager - used by Metaspace to handle allocations
599class SpaceManager : public CHeapObj<mtClass> {
600  friend class Metaspace;
601  friend class Metadebug;
602
603 private:
604
605  // protects allocations
606  Mutex* const _lock;
607
608  // Type of metadata allocated.
609  Metaspace::MetadataType _mdtype;
610
611  // List of chunks in use by this SpaceManager.  Allocations
612  // are done from the current chunk.  The list is used for deallocating
613  // chunks when the SpaceManager is freed.
614  Metachunk* _chunks_in_use[NumberOfInUseLists];
615  Metachunk* _current_chunk;
616
617  // Number of small chunks to allocate to a manager
618  // If class space manager, small chunks are unlimited
619  static uint const _small_chunk_limit;
620
621  // Sum of all space in allocated chunks
622  size_t _allocated_blocks_words;
623
624  // Sum of all allocated chunks
625  size_t _allocated_chunks_words;
626  size_t _allocated_chunks_count;
627
628  // Free lists of blocks are per SpaceManager since they
629  // are assumed to be in chunks in use by the SpaceManager
630  // and all chunks in use by a SpaceManager are freed when
631  // the class loader using the SpaceManager is collected.
632  BlockFreelist _block_freelists;
633
634  // protects virtualspace and chunk expansions
635  static const char*  _expand_lock_name;
636  static const int    _expand_lock_rank;
637  static Mutex* const _expand_lock;
638
639 private:
640  // Accessors
641  Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
642  void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
643    _chunks_in_use[index] = v;
644  }
645
646  BlockFreelist* block_freelists() const {
647    return (BlockFreelist*) &_block_freelists;
648  }
649
650  Metaspace::MetadataType mdtype() { return _mdtype; }
651
652  VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
653  ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
654
655  Metachunk* current_chunk() const { return _current_chunk; }
656  void set_current_chunk(Metachunk* v) {
657    _current_chunk = v;
658  }
659
660  Metachunk* find_current_chunk(size_t word_size);
661
662  // Add chunk to the list of chunks in use
663  void add_chunk(Metachunk* v, bool make_current);
664  void retire_current_chunk();
665
666  Mutex* lock() const { return _lock; }
667
668  const char* chunk_size_name(ChunkIndex index) const;
669
670 protected:
671  void initialize();
672
673 public:
674  SpaceManager(Metaspace::MetadataType mdtype,
675               Mutex* lock);
676  ~SpaceManager();
677
678  enum ChunkMultiples {
679    MediumChunkMultiple = 4
680  };
681
682  bool is_class() { return _mdtype == Metaspace::ClassType; }
683
684  // Accessors
685  size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; }
686  size_t small_chunk_size()       { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
687  size_t medium_chunk_size()      { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
688  size_t medium_chunk_bunch()     { return medium_chunk_size() * MediumChunkMultiple; }
689
690  size_t smallest_chunk_size()  { return specialized_chunk_size(); }
691
692  size_t allocated_blocks_words() const { return _allocated_blocks_words; }
693  size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
694  size_t allocated_chunks_words() const { return _allocated_chunks_words; }
695  size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; }
696  size_t allocated_chunks_count() const { return _allocated_chunks_count; }
697
698  bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
699
700  static Mutex* expand_lock() { return _expand_lock; }
701
702  // Increment the per Metaspace and global running sums for Metachunks
703  // by the given size.  This is used when a Metachunk to added to
704  // the in-use list.
705  void inc_size_metrics(size_t words);
706  // Increment the per Metaspace and global running sums Metablocks by the given
707  // size.  This is used when a Metablock is allocated.
708  void inc_used_metrics(size_t words);
709  // Delete the portion of the running sums for this SpaceManager. That is,
710  // the globals running sums for the Metachunks and Metablocks are
711  // decremented for all the Metachunks in-use by this SpaceManager.
712  void dec_total_from_size_metrics();
713
714  // Set the sizes for the initial chunks.
715  void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
716                               size_t* chunk_word_size,
717                               size_t* class_chunk_word_size);
718
719  size_t sum_capacity_in_chunks_in_use() const;
720  size_t sum_used_in_chunks_in_use() const;
721  size_t sum_free_in_chunks_in_use() const;
722  size_t sum_waste_in_chunks_in_use() const;
723  size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
724
725  size_t sum_count_in_chunks_in_use();
726  size_t sum_count_in_chunks_in_use(ChunkIndex i);
727
728  Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);
729
730  // Block allocation and deallocation.
731  // Allocates a block from the current chunk
732  MetaWord* allocate(size_t word_size);
733
734  // Helper for allocations
735  MetaWord* allocate_work(size_t word_size);
736
737  // Returns a block to the per manager freelist
738  void deallocate(MetaWord* p, size_t word_size);
739
740  // Based on the allocation size and a minimum chunk size,
741  // returned chunk size (for expanding space for chunk allocation).
742  size_t calc_chunk_size(size_t allocation_word_size);
743
744  // Called when an allocation from the current chunk fails.
745  // Gets a new chunk (may require getting a new virtual space),
746  // and allocates from that chunk.
747  MetaWord* grow_and_allocate(size_t word_size);
748
749  // Notify memory usage to MemoryService.
750  void track_metaspace_memory_usage();
751
752  // debugging support.
753
754  void dump(outputStream* const out) const;
755  void print_on(outputStream* st) const;
756  void locked_print_chunks_in_use_on(outputStream* st) const;
757
758  void verify();
759  void verify_chunk_size(Metachunk* chunk);
760  NOT_PRODUCT(void mangle_freed_chunks();)
761#ifdef ASSERT
762  void verify_allocated_blocks_words();
763#endif
764
765  size_t get_raw_word_size(size_t word_size) {
766    size_t byte_size = word_size * BytesPerWord;
767
768    size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
769    raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment());
770
771    size_t raw_word_size = raw_bytes_size / BytesPerWord;
772    assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
773
774    return raw_word_size;
775  }
776};
777
778uint const SpaceManager::_small_chunk_limit = 4;
779
780const char* SpaceManager::_expand_lock_name =
781  "SpaceManager chunk allocation lock";
782const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
783Mutex* const SpaceManager::_expand_lock =
784  new Mutex(SpaceManager::_expand_lock_rank,
785            SpaceManager::_expand_lock_name,
786            Mutex::_allow_vm_block_flag,
787            Monitor::_safepoint_check_never);
788
789void VirtualSpaceNode::inc_container_count() {
790  assert_lock_strong(SpaceManager::expand_lock());
791  _container_count++;
792  DEBUG_ONLY(verify_container_count();)
793}
794
795void VirtualSpaceNode::dec_container_count() {
796  assert_lock_strong(SpaceManager::expand_lock());
797  _container_count--;
798}
799
800#ifdef ASSERT
801void VirtualSpaceNode::verify_container_count() {
802  assert(_container_count == container_count_slow(),
803    err_msg("Inconsistency in container_count _container_count " UINTX_FORMAT
804            " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow()));
805}
806#endif
807
808// BlockFreelist methods
809
810BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()) {}
811
812BlockFreelist::~BlockFreelist() {
813  if (Verbose && TraceMetadataChunkAllocation) {
814    dictionary()->print_free_lists(gclog_or_tty);
815  }
816  delete _dictionary;
817}
818
819void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
820  Metablock* free_chunk = ::new (p) Metablock(word_size);
821  dictionary()->return_chunk(free_chunk);
822}
823
824MetaWord* BlockFreelist::get_block(size_t word_size) {
825  if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
826    // Dark matter.  Too small for dictionary.
827    return NULL;
828  }
829
830  Metablock* free_block =
831    dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
832  if (free_block == NULL) {
833    return NULL;
834  }
835
836  const size_t block_size = free_block->size();
837  if (block_size > WasteMultiplier * word_size) {
838    return_block((MetaWord*)free_block, block_size);
839    return NULL;
840  }
841
842  MetaWord* new_block = (MetaWord*)free_block;
843  assert(block_size >= word_size, "Incorrect size of block from freelist");
844  const size_t unused = block_size - word_size;
845  if (unused >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
846    return_block(new_block + word_size, unused);
847  }
848
849  return new_block;
850}
851
852void BlockFreelist::print_on(outputStream* st) const {
853  dictionary()->print_free_lists(st);
854}
855
856// VirtualSpaceNode methods
857
858VirtualSpaceNode::~VirtualSpaceNode() {
859  _rs.release();
860#ifdef ASSERT
861  size_t word_size = sizeof(*this) / BytesPerWord;
862  Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
863#endif
864}
865
866size_t VirtualSpaceNode::used_words_in_vs() const {
867  return pointer_delta(top(), bottom(), sizeof(MetaWord));
868}
869
870// Space committed in the VirtualSpace
871size_t VirtualSpaceNode::capacity_words_in_vs() const {
872  return pointer_delta(end(), bottom(), sizeof(MetaWord));
873}
874
875size_t VirtualSpaceNode::free_words_in_vs() const {
876  return pointer_delta(end(), top(), sizeof(MetaWord));
877}
878
879// Allocates the chunk from the virtual space only.
880// This interface is also used internally for debugging.  Not all
881// chunks removed here are necessarily used for allocation.
882Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
883  // Bottom of the new chunk
884  MetaWord* chunk_limit = top();
885  assert(chunk_limit != NULL, "Not safe to call this method");
886
887  // The virtual spaces are always expanded by the
888  // commit granularity to enforce the following condition.
889  // Without this the is_available check will not work correctly.
890  assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
891      "The committed memory doesn't match the expanded memory.");
892
893  if (!is_available(chunk_word_size)) {
894    if (TraceMetadataChunkAllocation) {
895      gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
896      // Dump some information about the virtual space that is nearly full
897      print_on(gclog_or_tty);
898    }
899    return NULL;
900  }
901
902  // Take the space  (bump top on the current virtual space).
903  inc_top(chunk_word_size);
904
905  // Initialize the chunk
906  Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
907  return result;
908}
909
910
911// Expand the virtual space (commit more of the reserved space)
912bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
913  size_t min_bytes = min_words * BytesPerWord;
914  size_t preferred_bytes = preferred_words * BytesPerWord;
915
916  size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
917
918  if (uncommitted < min_bytes) {
919    return false;
920  }
921
922  size_t commit = MIN2(preferred_bytes, uncommitted);
923  bool result = virtual_space()->expand_by(commit, false);
924
925  assert(result, "Failed to commit memory");
926
927  return result;
928}
929
930Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
931  assert_lock_strong(SpaceManager::expand_lock());
932  Metachunk* result = take_from_committed(chunk_word_size);
933  if (result != NULL) {
934    inc_container_count();
935  }
936  return result;
937}
938
939bool VirtualSpaceNode::initialize() {
940
941  if (!_rs.is_reserved()) {
942    return false;
943  }
944
945  // These are necessary restriction to make sure that the virtual space always
946  // grows in steps of Metaspace::commit_alignment(). If both base and size are
947  // aligned only the middle alignment of the VirtualSpace is used.
948  assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
949  assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
950
951  // ReservedSpaces marked as special will have the entire memory
952  // pre-committed. Setting a committed size will make sure that
953  // committed_size and actual_committed_size agrees.
954  size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
955
956  bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
957                                            Metaspace::commit_alignment());
958  if (result) {
959    assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
960        "Checking that the pre-committed memory was registered by the VirtualSpace");
961
962    set_top((MetaWord*)virtual_space()->low());
963    set_reserved(MemRegion((HeapWord*)_rs.base(),
964                 (HeapWord*)(_rs.base() + _rs.size())));
965
966    assert(reserved()->start() == (HeapWord*) _rs.base(),
967      err_msg("Reserved start was not set properly " PTR_FORMAT
968        " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base())));
969    assert(reserved()->word_size() == _rs.size() / BytesPerWord,
970      err_msg("Reserved size was not set properly " SIZE_FORMAT
971        " != " SIZE_FORMAT, reserved()->word_size(),
972        _rs.size() / BytesPerWord));
973  }
974
975  return result;
976}
977
978void VirtualSpaceNode::print_on(outputStream* st) const {
979  size_t used = used_words_in_vs();
980  size_t capacity = capacity_words_in_vs();
981  VirtualSpace* vs = virtual_space();
982  st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used "
983           "[" PTR_FORMAT ", " PTR_FORMAT ", "
984           PTR_FORMAT ", " PTR_FORMAT ")",
985           p2i(vs), capacity / K,
986           capacity == 0 ? 0 : used * 100 / capacity,
987           p2i(bottom()), p2i(top()), p2i(end()),
988           p2i(vs->high_boundary()));
989}
990
991#ifdef ASSERT
992void VirtualSpaceNode::mangle() {
993  size_t word_size = capacity_words_in_vs();
994  Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
995}
996#endif // ASSERT
997
998// VirtualSpaceList methods
999// Space allocated from the VirtualSpace
1000
1001VirtualSpaceList::~VirtualSpaceList() {
1002  VirtualSpaceListIterator iter(virtual_space_list());
1003  while (iter.repeat()) {
1004    VirtualSpaceNode* vsl = iter.get_next();
1005    delete vsl;
1006  }
1007}
1008
1009void VirtualSpaceList::inc_reserved_words(size_t v) {
1010  assert_lock_strong(SpaceManager::expand_lock());
1011  _reserved_words = _reserved_words + v;
1012}
1013void VirtualSpaceList::dec_reserved_words(size_t v) {
1014  assert_lock_strong(SpaceManager::expand_lock());
1015  _reserved_words = _reserved_words - v;
1016}
1017
1018#define assert_committed_below_limit()                             \
1019  assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize,      \
1020      err_msg("Too much committed memory. Committed: " SIZE_FORMAT \
1021              " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1022          MetaspaceAux::committed_bytes(), MaxMetaspaceSize));
1023
1024void VirtualSpaceList::inc_committed_words(size_t v) {
1025  assert_lock_strong(SpaceManager::expand_lock());
1026  _committed_words = _committed_words + v;
1027
1028  assert_committed_below_limit();
1029}
1030void VirtualSpaceList::dec_committed_words(size_t v) {
1031  assert_lock_strong(SpaceManager::expand_lock());
1032  _committed_words = _committed_words - v;
1033
1034  assert_committed_below_limit();
1035}
1036
1037void VirtualSpaceList::inc_virtual_space_count() {
1038  assert_lock_strong(SpaceManager::expand_lock());
1039  _virtual_space_count++;
1040}
1041void VirtualSpaceList::dec_virtual_space_count() {
1042  assert_lock_strong(SpaceManager::expand_lock());
1043  _virtual_space_count--;
1044}
1045
1046void ChunkManager::remove_chunk(Metachunk* chunk) {
1047  size_t word_size = chunk->word_size();
1048  ChunkIndex index = list_index(word_size);
1049  if (index != HumongousIndex) {
1050    free_chunks(index)->remove_chunk(chunk);
1051  } else {
1052    humongous_dictionary()->remove_chunk(chunk);
1053  }
1054
1055  // Chunk is being removed from the chunks free list.
1056  dec_free_chunks_total(chunk->word_size());
1057}
1058
1059// Walk the list of VirtualSpaceNodes and delete
1060// nodes with a 0 container_count.  Remove Metachunks in
1061// the node from their respective freelists.
1062void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1063  assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
1064  assert_lock_strong(SpaceManager::expand_lock());
1065  // Don't use a VirtualSpaceListIterator because this
1066  // list is being changed and a straightforward use of an iterator is not safe.
1067  VirtualSpaceNode* purged_vsl = NULL;
1068  VirtualSpaceNode* prev_vsl = virtual_space_list();
1069  VirtualSpaceNode* next_vsl = prev_vsl;
1070  while (next_vsl != NULL) {
1071    VirtualSpaceNode* vsl = next_vsl;
1072    next_vsl = vsl->next();
1073    // Don't free the current virtual space since it will likely
1074    // be needed soon.
1075    if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1076      // Unlink it from the list
1077      if (prev_vsl == vsl) {
1078        // This is the case of the current node being the first node.
1079        assert(vsl == virtual_space_list(), "Expected to be the first node");
1080        set_virtual_space_list(vsl->next());
1081      } else {
1082        prev_vsl->set_next(vsl->next());
1083      }
1084
1085      vsl->purge(chunk_manager);
1086      dec_reserved_words(vsl->reserved_words());
1087      dec_committed_words(vsl->committed_words());
1088      dec_virtual_space_count();
1089      purged_vsl = vsl;
1090      delete vsl;
1091    } else {
1092      prev_vsl = vsl;
1093    }
1094  }
1095#ifdef ASSERT
1096  if (purged_vsl != NULL) {
1097    // List should be stable enough to use an iterator here.
1098    VirtualSpaceListIterator iter(virtual_space_list());
1099    while (iter.repeat()) {
1100      VirtualSpaceNode* vsl = iter.get_next();
1101      assert(vsl != purged_vsl, "Purge of vsl failed");
1102    }
1103  }
1104#endif
1105}
1106
1107
1108// This function looks at the mmap regions in the metaspace without locking.
1109// The chunks are added with store ordering and not deleted except for at
1110// unloading time during a safepoint.
1111bool VirtualSpaceList::contains(const void* ptr) {
1112  // List should be stable enough to use an iterator here because removing virtual
1113  // space nodes is only allowed at a safepoint.
1114  VirtualSpaceListIterator iter(virtual_space_list());
1115  while (iter.repeat()) {
1116    VirtualSpaceNode* vsn = iter.get_next();
1117    if (vsn->contains(ptr)) {
1118      return true;
1119    }
1120  }
1121  return false;
1122}
1123
1124void VirtualSpaceList::retire_current_virtual_space() {
1125  assert_lock_strong(SpaceManager::expand_lock());
1126
1127  VirtualSpaceNode* vsn = current_virtual_space();
1128
1129  ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
1130                                  Metaspace::chunk_manager_metadata();
1131
1132  vsn->retire(cm);
1133}
1134
1135void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
1136  for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
1137    ChunkIndex index = (ChunkIndex)i;
1138    size_t chunk_size = chunk_manager->free_chunks(index)->size();
1139
1140    while (free_words_in_vs() >= chunk_size) {
1141      DEBUG_ONLY(verify_container_count();)
1142      Metachunk* chunk = get_chunk_vs(chunk_size);
1143      assert(chunk != NULL, "allocation should have been successful");
1144
1145      chunk_manager->return_chunks(index, chunk);
1146      chunk_manager->inc_free_chunks_total(chunk_size);
1147      DEBUG_ONLY(verify_container_count();)
1148    }
1149  }
1150  assert(free_words_in_vs() == 0, "should be empty now");
1151}
1152
1153VirtualSpaceList::VirtualSpaceList(size_t word_size) :
1154                                   _is_class(false),
1155                                   _virtual_space_list(NULL),
1156                                   _current_virtual_space(NULL),
1157                                   _reserved_words(0),
1158                                   _committed_words(0),
1159                                   _virtual_space_count(0) {
1160  MutexLockerEx cl(SpaceManager::expand_lock(),
1161                   Mutex::_no_safepoint_check_flag);
1162  create_new_virtual_space(word_size);
1163}
1164
1165VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1166                                   _is_class(true),
1167                                   _virtual_space_list(NULL),
1168                                   _current_virtual_space(NULL),
1169                                   _reserved_words(0),
1170                                   _committed_words(0),
1171                                   _virtual_space_count(0) {
1172  MutexLockerEx cl(SpaceManager::expand_lock(),
1173                   Mutex::_no_safepoint_check_flag);
1174  VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1175  bool succeeded = class_entry->initialize();
1176  if (succeeded) {
1177    link_vs(class_entry);
1178  }
1179}
1180
1181size_t VirtualSpaceList::free_bytes() {
1182  return virtual_space_list()->free_words_in_vs() * BytesPerWord;
1183}
1184
1185// Allocate another meta virtual space and add it to the list.
1186bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
1187  assert_lock_strong(SpaceManager::expand_lock());
1188
1189  if (is_class()) {
1190    assert(false, "We currently don't support more than one VirtualSpace for"
1191                  " the compressed class space. The initialization of the"
1192                  " CCS uses another code path and should not hit this path.");
1193    return false;
1194  }
1195
1196  if (vs_word_size == 0) {
1197    assert(false, "vs_word_size should always be at least _reserve_alignment large.");
1198    return false;
1199  }
1200
1201  // Reserve the space
1202  size_t vs_byte_size = vs_word_size * BytesPerWord;
1203  assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
1204
1205  // Allocate the meta virtual space and initialize it.
1206  VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1207  if (!new_entry->initialize()) {
1208    delete new_entry;
1209    return false;
1210  } else {
1211    assert(new_entry->reserved_words() == vs_word_size,
1212        "Reserved memory size differs from requested memory size");
1213    // ensure lock-free iteration sees fully initialized node
1214    OrderAccess::storestore();
1215    link_vs(new_entry);
1216    return true;
1217  }
1218}
1219
1220void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1221  if (virtual_space_list() == NULL) {
1222      set_virtual_space_list(new_entry);
1223  } else {
1224    current_virtual_space()->set_next(new_entry);
1225  }
1226  set_current_virtual_space(new_entry);
1227  inc_reserved_words(new_entry->reserved_words());
1228  inc_committed_words(new_entry->committed_words());
1229  inc_virtual_space_count();
1230#ifdef ASSERT
1231  new_entry->mangle();
1232#endif
1233  if (TraceMetavirtualspaceAllocation && Verbose) {
1234    VirtualSpaceNode* vsl = current_virtual_space();
1235    vsl->print_on(gclog_or_tty);
1236  }
1237}
1238
1239bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1240                                      size_t min_words,
1241                                      size_t preferred_words) {
1242  size_t before = node->committed_words();
1243
1244  bool result = node->expand_by(min_words, preferred_words);
1245
1246  size_t after = node->committed_words();
1247
1248  // after and before can be the same if the memory was pre-committed.
1249  assert(after >= before, "Inconsistency");
1250  inc_committed_words(after - before);
1251
1252  return result;
1253}
1254
1255bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
1256  assert_is_size_aligned(min_words,       Metaspace::commit_alignment_words());
1257  assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
1258  assert(min_words <= preferred_words, "Invalid arguments");
1259
1260  if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
1261    return  false;
1262  }
1263
1264  size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
1265  if (allowed_expansion_words < min_words) {
1266    return false;
1267  }
1268
1269  size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
1270
1271  // Commit more memory from the the current virtual space.
1272  bool vs_expanded = expand_node_by(current_virtual_space(),
1273                                    min_words,
1274                                    max_expansion_words);
1275  if (vs_expanded) {
1276    return true;
1277  }
1278  retire_current_virtual_space();
1279
1280  // Get another virtual space.
1281  size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
1282  grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
1283
1284  if (create_new_virtual_space(grow_vs_words)) {
1285    if (current_virtual_space()->is_pre_committed()) {
1286      // The memory was pre-committed, so we are done here.
1287      assert(min_words <= current_virtual_space()->committed_words(),
1288          "The new VirtualSpace was pre-committed, so it"
1289          "should be large enough to fit the alloc request.");
1290      return true;
1291    }
1292
1293    return expand_node_by(current_virtual_space(),
1294                          min_words,
1295                          max_expansion_words);
1296  }
1297
1298  return false;
1299}
1300
1301Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
1302                                           size_t grow_chunks_by_words,
1303                                           size_t medium_chunk_bunch) {
1304
1305  // Allocate a chunk out of the current virtual space.
1306  Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1307
1308  if (next != NULL) {
1309    return next;
1310  }
1311
1312  // The expand amount is currently only determined by the requested sizes
1313  // and not how much committed memory is left in the current virtual space.
1314
1315  size_t min_word_size       = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
1316  size_t preferred_word_size = align_size_up(medium_chunk_bunch,   Metaspace::commit_alignment_words());
1317  if (min_word_size >= preferred_word_size) {
1318    // Can happen when humongous chunks are allocated.
1319    preferred_word_size = min_word_size;
1320  }
1321
1322  bool expanded = expand_by(min_word_size, preferred_word_size);
1323  if (expanded) {
1324    next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1325    assert(next != NULL, "The allocation was expected to succeed after the expansion");
1326  }
1327
1328   return next;
1329}
1330
1331void VirtualSpaceList::print_on(outputStream* st) const {
1332  if (TraceMetadataChunkAllocation && Verbose) {
1333    VirtualSpaceListIterator iter(virtual_space_list());
1334    while (iter.repeat()) {
1335      VirtualSpaceNode* node = iter.get_next();
1336      node->print_on(st);
1337    }
1338  }
1339}
1340
1341// MetaspaceGC methods
1342
1343// VM_CollectForMetadataAllocation is the vm operation used to GC.
1344// Within the VM operation after the GC the attempt to allocate the metadata
1345// should succeed.  If the GC did not free enough space for the metaspace
1346// allocation, the HWM is increased so that another virtualspace will be
1347// allocated for the metadata.  With perm gen the increase in the perm
1348// gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
1349// metaspace policy uses those as the small and large steps for the HWM.
1350//
1351// After the GC the compute_new_size() for MetaspaceGC is called to
1352// resize the capacity of the metaspaces.  The current implementation
1353// is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1354// to resize the Java heap by some GC's.  New flags can be implemented
1355// if really needed.  MinMetaspaceFreeRatio is used to calculate how much
1356// free space is desirable in the metaspace capacity to decide how much
1357// to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
1358// free space is desirable in the metaspace capacity before decreasing
1359// the HWM.
1360
1361// Calculate the amount to increase the high water mark (HWM).
1362// Increase by a minimum amount (MinMetaspaceExpansion) so that
1363// another expansion is not requested too soon.  If that is not
1364// enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
1365// If that is still not enough, expand by the size of the allocation
1366// plus some.
1367size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
1368  size_t min_delta = MinMetaspaceExpansion;
1369  size_t max_delta = MaxMetaspaceExpansion;
1370  size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
1371
1372  if (delta <= min_delta) {
1373    delta = min_delta;
1374  } else if (delta <= max_delta) {
1375    // Don't want to hit the high water mark on the next
1376    // allocation so make the delta greater than just enough
1377    // for this allocation.
1378    delta = max_delta;
1379  } else {
1380    // This allocation is large but the next ones are probably not
1381    // so increase by the minimum.
1382    delta = delta + min_delta;
1383  }
1384
1385  assert_is_size_aligned(delta, Metaspace::commit_alignment());
1386
1387  return delta;
1388}
1389
1390size_t MetaspaceGC::capacity_until_GC() {
1391  size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
1392  assert(value >= MetaspaceSize, "Not initialized properly?");
1393  return value;
1394}
1395
1396bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
1397  assert_is_size_aligned(v, Metaspace::commit_alignment());
1398
1399  size_t capacity_until_GC = (size_t) _capacity_until_GC;
1400  size_t new_value = capacity_until_GC + v;
1401
1402  if (new_value < capacity_until_GC) {
1403    // The addition wrapped around, set new_value to aligned max value.
1404    new_value = align_size_down(max_uintx, Metaspace::commit_alignment());
1405  }
1406
1407  intptr_t expected = (intptr_t) capacity_until_GC;
1408  intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected);
1409
1410  if (expected != actual) {
1411    return false;
1412  }
1413
1414  if (new_cap_until_GC != NULL) {
1415    *new_cap_until_GC = new_value;
1416  }
1417  if (old_cap_until_GC != NULL) {
1418    *old_cap_until_GC = capacity_until_GC;
1419  }
1420  return true;
1421}
1422
1423size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1424  assert_is_size_aligned(v, Metaspace::commit_alignment());
1425
1426  return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
1427}
1428
1429void MetaspaceGC::initialize() {
1430  // Set the high-water mark to MaxMetapaceSize during VM initializaton since
1431  // we can't do a GC during initialization.
1432  _capacity_until_GC = MaxMetaspaceSize;
1433}
1434
1435void MetaspaceGC::post_initialize() {
1436  // Reset the high-water mark once the VM initialization is done.
1437  _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
1438}
1439
1440bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1441  // Check if the compressed class space is full.
1442  if (is_class && Metaspace::using_class_space()) {
1443    size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1444    if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1445      return false;
1446    }
1447  }
1448
1449  // Check if the user has imposed a limit on the metaspace memory.
1450  size_t committed_bytes = MetaspaceAux::committed_bytes();
1451  if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
1452    return false;
1453  }
1454
1455  return true;
1456}
1457
1458size_t MetaspaceGC::allowed_expansion() {
1459  size_t committed_bytes = MetaspaceAux::committed_bytes();
1460  size_t capacity_until_gc = capacity_until_GC();
1461
1462  assert(capacity_until_gc >= committed_bytes,
1463        err_msg("capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
1464                capacity_until_gc, committed_bytes));
1465
1466  size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
1467  size_t left_until_GC = capacity_until_gc - committed_bytes;
1468  size_t left_to_commit = MIN2(left_until_GC, left_until_max);
1469
1470  return left_to_commit / BytesPerWord;
1471}
1472
1473void MetaspaceGC::compute_new_size() {
1474  assert(_shrink_factor <= 100, "invalid shrink factor");
1475  uint current_shrink_factor = _shrink_factor;
1476  _shrink_factor = 0;
1477
1478  // Using committed_bytes() for used_after_gc is an overestimation, since the
1479  // chunk free lists are included in committed_bytes() and the memory in an
1480  // un-fragmented chunk free list is available for future allocations.
1481  // However, if the chunk free lists becomes fragmented, then the memory may
1482  // not be available for future allocations and the memory is therefore "in use".
1483  // Including the chunk free lists in the definition of "in use" is therefore
1484  // necessary. Not including the chunk free lists can cause capacity_until_GC to
1485  // shrink below committed_bytes() and this has caused serious bugs in the past.
1486  const size_t used_after_gc = MetaspaceAux::committed_bytes();
1487  const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1488
1489  const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1490  const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1491
1492  const double min_tmp = used_after_gc / maximum_used_percentage;
1493  size_t minimum_desired_capacity =
1494    (size_t)MIN2(min_tmp, double(max_uintx));
1495  // Don't shrink less than the initial generation size
1496  minimum_desired_capacity = MAX2(minimum_desired_capacity,
1497                                  MetaspaceSize);
1498
1499  if (PrintGCDetails && Verbose) {
1500    gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
1501    gclog_or_tty->print_cr("  "
1502                  "  minimum_free_percentage: %6.2f"
1503                  "  maximum_used_percentage: %6.2f",
1504                  minimum_free_percentage,
1505                  maximum_used_percentage);
1506    gclog_or_tty->print_cr("  "
1507                  "   used_after_gc       : %6.1fKB",
1508                  used_after_gc / (double) K);
1509  }
1510
1511
1512  size_t shrink_bytes = 0;
1513  if (capacity_until_GC < minimum_desired_capacity) {
1514    // If we have less capacity below the metaspace HWM, then
1515    // increment the HWM.
1516    size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1517    expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
1518    // Don't expand unless it's significant
1519    if (expand_bytes >= MinMetaspaceExpansion) {
1520      size_t new_capacity_until_GC = 0;
1521      bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
1522      assert(succeeded, "Should always succesfully increment HWM when at safepoint");
1523
1524      Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1525                                               new_capacity_until_GC,
1526                                               MetaspaceGCThresholdUpdater::ComputeNewSize);
1527      if (PrintGCDetails && Verbose) {
1528        gclog_or_tty->print_cr("    expanding:"
1529                      "  minimum_desired_capacity: %6.1fKB"
1530                      "  expand_bytes: %6.1fKB"
1531                      "  MinMetaspaceExpansion: %6.1fKB"
1532                      "  new metaspace HWM:  %6.1fKB",
1533                      minimum_desired_capacity / (double) K,
1534                      expand_bytes / (double) K,
1535                      MinMetaspaceExpansion / (double) K,
1536                      new_capacity_until_GC / (double) K);
1537      }
1538    }
1539    return;
1540  }
1541
1542  // No expansion, now see if we want to shrink
1543  // We would never want to shrink more than this
1544  assert(capacity_until_GC >= minimum_desired_capacity,
1545         err_msg(SIZE_FORMAT " >= " SIZE_FORMAT,
1546                 capacity_until_GC, minimum_desired_capacity));
1547  size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1548
1549  // Should shrinking be considered?
1550  if (MaxMetaspaceFreeRatio < 100) {
1551    const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1552    const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1553    const double max_tmp = used_after_gc / minimum_used_percentage;
1554    size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1555    maximum_desired_capacity = MAX2(maximum_desired_capacity,
1556                                    MetaspaceSize);
1557    if (PrintGCDetails && Verbose) {
1558      gclog_or_tty->print_cr("  "
1559                             "  maximum_free_percentage: %6.2f"
1560                             "  minimum_used_percentage: %6.2f",
1561                             maximum_free_percentage,
1562                             minimum_used_percentage);
1563      gclog_or_tty->print_cr("  "
1564                             "  minimum_desired_capacity: %6.1fKB"
1565                             "  maximum_desired_capacity: %6.1fKB",
1566                             minimum_desired_capacity / (double) K,
1567                             maximum_desired_capacity / (double) K);
1568    }
1569
1570    assert(minimum_desired_capacity <= maximum_desired_capacity,
1571           "sanity check");
1572
1573    if (capacity_until_GC > maximum_desired_capacity) {
1574      // Capacity too large, compute shrinking size
1575      shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1576      // We don't want shrink all the way back to initSize if people call
1577      // System.gc(), because some programs do that between "phases" and then
1578      // we'd just have to grow the heap up again for the next phase.  So we
1579      // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1580      // on the third call, and 100% by the fourth call.  But if we recompute
1581      // size without shrinking, it goes back to 0%.
1582      shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1583
1584      shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
1585
1586      assert(shrink_bytes <= max_shrink_bytes,
1587        err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1588          shrink_bytes, max_shrink_bytes));
1589      if (current_shrink_factor == 0) {
1590        _shrink_factor = 10;
1591      } else {
1592        _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1593      }
1594      if (PrintGCDetails && Verbose) {
1595        gclog_or_tty->print_cr("  "
1596                      "  shrinking:"
1597                      "  initSize: %.1fK"
1598                      "  maximum_desired_capacity: %.1fK",
1599                      MetaspaceSize / (double) K,
1600                      maximum_desired_capacity / (double) K);
1601        gclog_or_tty->print_cr("  "
1602                      "  shrink_bytes: %.1fK"
1603                      "  current_shrink_factor: %d"
1604                      "  new shrink factor: %d"
1605                      "  MinMetaspaceExpansion: %.1fK",
1606                      shrink_bytes / (double) K,
1607                      current_shrink_factor,
1608                      _shrink_factor,
1609                      MinMetaspaceExpansion / (double) K);
1610      }
1611    }
1612  }
1613
1614  // Don't shrink unless it's significant
1615  if (shrink_bytes >= MinMetaspaceExpansion &&
1616      ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1617    size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1618    Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1619                                             new_capacity_until_GC,
1620                                             MetaspaceGCThresholdUpdater::ComputeNewSize);
1621  }
1622}
1623
1624// Metadebug methods
1625
1626void Metadebug::init_allocation_fail_alot_count() {
1627  if (MetadataAllocationFailALot) {
1628    _allocation_fail_alot_count =
1629      1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1630  }
1631}
1632
1633#ifdef ASSERT
1634bool Metadebug::test_metadata_failure() {
1635  if (MetadataAllocationFailALot &&
1636      Threads::is_vm_complete()) {
1637    if (_allocation_fail_alot_count > 0) {
1638      _allocation_fail_alot_count--;
1639    } else {
1640      if (TraceMetadataChunkAllocation && Verbose) {
1641        gclog_or_tty->print_cr("Metadata allocation failing for "
1642                               "MetadataAllocationFailALot");
1643      }
1644      init_allocation_fail_alot_count();
1645      return true;
1646    }
1647  }
1648  return false;
1649}
1650#endif
1651
1652// ChunkManager methods
1653
1654size_t ChunkManager::free_chunks_total_words() {
1655  return _free_chunks_total;
1656}
1657
1658size_t ChunkManager::free_chunks_total_bytes() {
1659  return free_chunks_total_words() * BytesPerWord;
1660}
1661
1662size_t ChunkManager::free_chunks_count() {
1663#ifdef ASSERT
1664  if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1665    MutexLockerEx cl(SpaceManager::expand_lock(),
1666                     Mutex::_no_safepoint_check_flag);
1667    // This lock is only needed in debug because the verification
1668    // of the _free_chunks_totals walks the list of free chunks
1669    slow_locked_verify_free_chunks_count();
1670  }
1671#endif
1672  return _free_chunks_count;
1673}
1674
1675void ChunkManager::locked_verify_free_chunks_total() {
1676  assert_lock_strong(SpaceManager::expand_lock());
1677  assert(sum_free_chunks() == _free_chunks_total,
1678    err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
1679           " same as sum " SIZE_FORMAT, _free_chunks_total,
1680           sum_free_chunks()));
1681}
1682
1683void ChunkManager::verify_free_chunks_total() {
1684  MutexLockerEx cl(SpaceManager::expand_lock(),
1685                     Mutex::_no_safepoint_check_flag);
1686  locked_verify_free_chunks_total();
1687}
1688
1689void ChunkManager::locked_verify_free_chunks_count() {
1690  assert_lock_strong(SpaceManager::expand_lock());
1691  assert(sum_free_chunks_count() == _free_chunks_count,
1692    err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
1693           " same as sum " SIZE_FORMAT, _free_chunks_count,
1694           sum_free_chunks_count()));
1695}
1696
1697void ChunkManager::verify_free_chunks_count() {
1698#ifdef ASSERT
1699  MutexLockerEx cl(SpaceManager::expand_lock(),
1700                     Mutex::_no_safepoint_check_flag);
1701  locked_verify_free_chunks_count();
1702#endif
1703}
1704
1705void ChunkManager::verify() {
1706  MutexLockerEx cl(SpaceManager::expand_lock(),
1707                     Mutex::_no_safepoint_check_flag);
1708  locked_verify();
1709}
1710
1711void ChunkManager::locked_verify() {
1712  locked_verify_free_chunks_count();
1713  locked_verify_free_chunks_total();
1714}
1715
1716void ChunkManager::locked_print_free_chunks(outputStream* st) {
1717  assert_lock_strong(SpaceManager::expand_lock());
1718  st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1719                _free_chunks_total, _free_chunks_count);
1720}
1721
1722void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1723  assert_lock_strong(SpaceManager::expand_lock());
1724  st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1725                sum_free_chunks(), sum_free_chunks_count());
1726}
1727ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1728  return &_free_chunks[index];
1729}
1730
1731// These methods that sum the free chunk lists are used in printing
1732// methods that are used in product builds.
1733size_t ChunkManager::sum_free_chunks() {
1734  assert_lock_strong(SpaceManager::expand_lock());
1735  size_t result = 0;
1736  for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1737    ChunkList* list = free_chunks(i);
1738
1739    if (list == NULL) {
1740      continue;
1741    }
1742
1743    result = result + list->count() * list->size();
1744  }
1745  result = result + humongous_dictionary()->total_size();
1746  return result;
1747}
1748
1749size_t ChunkManager::sum_free_chunks_count() {
1750  assert_lock_strong(SpaceManager::expand_lock());
1751  size_t count = 0;
1752  for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1753    ChunkList* list = free_chunks(i);
1754    if (list == NULL) {
1755      continue;
1756    }
1757    count = count + list->count();
1758  }
1759  count = count + humongous_dictionary()->total_free_blocks();
1760  return count;
1761}
1762
1763ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1764  ChunkIndex index = list_index(word_size);
1765  assert(index < HumongousIndex, "No humongous list");
1766  return free_chunks(index);
1767}
1768
1769Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1770  assert_lock_strong(SpaceManager::expand_lock());
1771
1772  slow_locked_verify();
1773
1774  Metachunk* chunk = NULL;
1775  if (list_index(word_size) != HumongousIndex) {
1776    ChunkList* free_list = find_free_chunks_list(word_size);
1777    assert(free_list != NULL, "Sanity check");
1778
1779    chunk = free_list->head();
1780
1781    if (chunk == NULL) {
1782      return NULL;
1783    }
1784
1785    // Remove the chunk as the head of the list.
1786    free_list->remove_chunk(chunk);
1787
1788    if (TraceMetadataChunkAllocation && Verbose) {
1789      gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
1790                             PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1791                             p2i(free_list), p2i(chunk), chunk->word_size());
1792    }
1793  } else {
1794    chunk = humongous_dictionary()->get_chunk(
1795      word_size,
1796      FreeBlockDictionary<Metachunk>::atLeast);
1797
1798    if (chunk == NULL) {
1799      return NULL;
1800    }
1801
1802    if (TraceMetadataHumongousAllocation) {
1803      size_t waste = chunk->word_size() - word_size;
1804      gclog_or_tty->print_cr("Free list allocate humongous chunk size "
1805                             SIZE_FORMAT " for requested size " SIZE_FORMAT
1806                             " waste " SIZE_FORMAT,
1807                             chunk->word_size(), word_size, waste);
1808    }
1809  }
1810
1811  // Chunk is being removed from the chunks free list.
1812  dec_free_chunks_total(chunk->word_size());
1813
1814  // Remove it from the links to this freelist
1815  chunk->set_next(NULL);
1816  chunk->set_prev(NULL);
1817#ifdef ASSERT
1818  // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1819  // work.
1820  chunk->set_is_tagged_free(false);
1821#endif
1822  chunk->container()->inc_container_count();
1823
1824  slow_locked_verify();
1825  return chunk;
1826}
1827
1828Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1829  assert_lock_strong(SpaceManager::expand_lock());
1830  slow_locked_verify();
1831
1832  // Take from the beginning of the list
1833  Metachunk* chunk = free_chunks_get(word_size);
1834  if (chunk == NULL) {
1835    return NULL;
1836  }
1837
1838  assert((word_size <= chunk->word_size()) ||
1839         list_index(chunk->word_size() == HumongousIndex),
1840         "Non-humongous variable sized chunk");
1841  if (TraceMetadataChunkAllocation) {
1842    size_t list_count;
1843    if (list_index(word_size) < HumongousIndex) {
1844      ChunkList* list = find_free_chunks_list(word_size);
1845      list_count = list->count();
1846    } else {
1847      list_count = humongous_dictionary()->total_count();
1848    }
1849    gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
1850                        PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1851                        p2i(this), p2i(chunk), chunk->word_size(), list_count);
1852    locked_print_free_chunks(gclog_or_tty);
1853  }
1854
1855  return chunk;
1856}
1857
1858void ChunkManager::print_on(outputStream* out) const {
1859  if (PrintFLSStatistics != 0) {
1860    const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
1861  }
1862}
1863
1864// SpaceManager methods
1865
1866void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
1867                                           size_t* chunk_word_size,
1868                                           size_t* class_chunk_word_size) {
1869  switch (type) {
1870  case Metaspace::BootMetaspaceType:
1871    *chunk_word_size = Metaspace::first_chunk_word_size();
1872    *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
1873    break;
1874  case Metaspace::ROMetaspaceType:
1875    *chunk_word_size = SharedReadOnlySize / wordSize;
1876    *class_chunk_word_size = ClassSpecializedChunk;
1877    break;
1878  case Metaspace::ReadWriteMetaspaceType:
1879    *chunk_word_size = SharedReadWriteSize / wordSize;
1880    *class_chunk_word_size = ClassSpecializedChunk;
1881    break;
1882  case Metaspace::AnonymousMetaspaceType:
1883  case Metaspace::ReflectionMetaspaceType:
1884    *chunk_word_size = SpecializedChunk;
1885    *class_chunk_word_size = ClassSpecializedChunk;
1886    break;
1887  default:
1888    *chunk_word_size = SmallChunk;
1889    *class_chunk_word_size = ClassSmallChunk;
1890    break;
1891  }
1892  assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
1893    err_msg("Initial chunks sizes bad: data  " SIZE_FORMAT
1894            " class " SIZE_FORMAT,
1895            *chunk_word_size, *class_chunk_word_size));
1896}
1897
1898size_t SpaceManager::sum_free_in_chunks_in_use() const {
1899  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1900  size_t free = 0;
1901  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1902    Metachunk* chunk = chunks_in_use(i);
1903    while (chunk != NULL) {
1904      free += chunk->free_word_size();
1905      chunk = chunk->next();
1906    }
1907  }
1908  return free;
1909}
1910
1911size_t SpaceManager::sum_waste_in_chunks_in_use() const {
1912  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1913  size_t result = 0;
1914  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1915   result += sum_waste_in_chunks_in_use(i);
1916  }
1917
1918  return result;
1919}
1920
1921size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
1922  size_t result = 0;
1923  Metachunk* chunk = chunks_in_use(index);
1924  // Count the free space in all the chunk but not the
1925  // current chunk from which allocations are still being done.
1926  while (chunk != NULL) {
1927    if (chunk != current_chunk()) {
1928      result += chunk->free_word_size();
1929    }
1930    chunk = chunk->next();
1931  }
1932  return result;
1933}
1934
1935size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
1936  // For CMS use "allocated_chunks_words()" which does not need the
1937  // Metaspace lock.  For the other collectors sum over the
1938  // lists.  Use both methods as a check that "allocated_chunks_words()"
1939  // is correct.  That is, sum_capacity_in_chunks() is too expensive
1940  // to use in the product and allocated_chunks_words() should be used
1941  // but allow for  checking that allocated_chunks_words() returns the same
1942  // value as sum_capacity_in_chunks_in_use() which is the definitive
1943  // answer.
1944  if (UseConcMarkSweepGC) {
1945    return allocated_chunks_words();
1946  } else {
1947    MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1948    size_t sum = 0;
1949    for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1950      Metachunk* chunk = chunks_in_use(i);
1951      while (chunk != NULL) {
1952        sum += chunk->word_size();
1953        chunk = chunk->next();
1954      }
1955    }
1956  return sum;
1957  }
1958}
1959
1960size_t SpaceManager::sum_count_in_chunks_in_use() {
1961  size_t count = 0;
1962  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1963    count = count + sum_count_in_chunks_in_use(i);
1964  }
1965
1966  return count;
1967}
1968
1969size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
1970  size_t count = 0;
1971  Metachunk* chunk = chunks_in_use(i);
1972  while (chunk != NULL) {
1973    count++;
1974    chunk = chunk->next();
1975  }
1976  return count;
1977}
1978
1979
1980size_t SpaceManager::sum_used_in_chunks_in_use() const {
1981  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1982  size_t used = 0;
1983  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1984    Metachunk* chunk = chunks_in_use(i);
1985    while (chunk != NULL) {
1986      used += chunk->used_word_size();
1987      chunk = chunk->next();
1988    }
1989  }
1990  return used;
1991}
1992
1993void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
1994
1995  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1996    Metachunk* chunk = chunks_in_use(i);
1997    st->print("SpaceManager: %s " PTR_FORMAT,
1998                 chunk_size_name(i), p2i(chunk));
1999    if (chunk != NULL) {
2000      st->print_cr(" free " SIZE_FORMAT,
2001                   chunk->free_word_size());
2002    } else {
2003      st->cr();
2004    }
2005  }
2006
2007  chunk_manager()->locked_print_free_chunks(st);
2008  chunk_manager()->locked_print_sum_free_chunks(st);
2009}
2010
2011size_t SpaceManager::calc_chunk_size(size_t word_size) {
2012
2013  // Decide between a small chunk and a medium chunk.  Up to
2014  // _small_chunk_limit small chunks can be allocated but
2015  // once a medium chunk has been allocated, no more small
2016  // chunks will be allocated.
2017  size_t chunk_word_size;
2018  if (chunks_in_use(MediumIndex) == NULL &&
2019      sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
2020    chunk_word_size = (size_t) small_chunk_size();
2021    if (word_size + Metachunk::overhead() > small_chunk_size()) {
2022      chunk_word_size = medium_chunk_size();
2023    }
2024  } else {
2025    chunk_word_size = medium_chunk_size();
2026  }
2027
2028  // Might still need a humongous chunk.  Enforce
2029  // humongous allocations sizes to be aligned up to
2030  // the smallest chunk size.
2031  size_t if_humongous_sized_chunk =
2032    align_size_up(word_size + Metachunk::overhead(),
2033                  smallest_chunk_size());
2034  chunk_word_size =
2035    MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
2036
2037  assert(!SpaceManager::is_humongous(word_size) ||
2038         chunk_word_size == if_humongous_sized_chunk,
2039         err_msg("Size calculation is wrong, word_size " SIZE_FORMAT
2040                 " chunk_word_size " SIZE_FORMAT,
2041                 word_size, chunk_word_size));
2042  if (TraceMetadataHumongousAllocation &&
2043      SpaceManager::is_humongous(word_size)) {
2044    gclog_or_tty->print_cr("Metadata humongous allocation:");
2045    gclog_or_tty->print_cr("  word_size " PTR_FORMAT, word_size);
2046    gclog_or_tty->print_cr("  chunk_word_size " PTR_FORMAT,
2047                           chunk_word_size);
2048    gclog_or_tty->print_cr("    chunk overhead " PTR_FORMAT,
2049                           Metachunk::overhead());
2050  }
2051  return chunk_word_size;
2052}
2053
2054void SpaceManager::track_metaspace_memory_usage() {
2055  if (is_init_completed()) {
2056    if (is_class()) {
2057      MemoryService::track_compressed_class_memory_usage();
2058    }
2059    MemoryService::track_metaspace_memory_usage();
2060  }
2061}
2062
2063MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
2064  assert(vs_list()->current_virtual_space() != NULL,
2065         "Should have been set");
2066  assert(current_chunk() == NULL ||
2067         current_chunk()->allocate(word_size) == NULL,
2068         "Don't need to expand");
2069  MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2070
2071  if (TraceMetadataChunkAllocation && Verbose) {
2072    size_t words_left = 0;
2073    size_t words_used = 0;
2074    if (current_chunk() != NULL) {
2075      words_left = current_chunk()->free_word_size();
2076      words_used = current_chunk()->used_word_size();
2077    }
2078    gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
2079                           " words " SIZE_FORMAT " words used " SIZE_FORMAT
2080                           " words left",
2081                            word_size, words_used, words_left);
2082  }
2083
2084  // Get another chunk out of the virtual space
2085  size_t grow_chunks_by_words = calc_chunk_size(word_size);
2086  Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
2087
2088  MetaWord* mem = NULL;
2089
2090  // If a chunk was available, add it to the in-use chunk list
2091  // and do an allocation from it.
2092  if (next != NULL) {
2093    // Add to this manager's list of chunks in use.
2094    add_chunk(next, false);
2095    mem = next->allocate(word_size);
2096  }
2097
2098  // Track metaspace memory usage statistic.
2099  track_metaspace_memory_usage();
2100
2101  return mem;
2102}
2103
2104void SpaceManager::print_on(outputStream* st) const {
2105
2106  for (ChunkIndex i = ZeroIndex;
2107       i < NumberOfInUseLists ;
2108       i = next_chunk_index(i) ) {
2109    st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT,
2110                 p2i(chunks_in_use(i)),
2111                 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
2112  }
2113  st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
2114               " Humongous " SIZE_FORMAT,
2115               sum_waste_in_chunks_in_use(SmallIndex),
2116               sum_waste_in_chunks_in_use(MediumIndex),
2117               sum_waste_in_chunks_in_use(HumongousIndex));
2118  // block free lists
2119  if (block_freelists() != NULL) {
2120    st->print_cr("total in block free lists " SIZE_FORMAT,
2121      block_freelists()->total_size());
2122  }
2123}
2124
2125SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
2126                           Mutex* lock) :
2127  _mdtype(mdtype),
2128  _allocated_blocks_words(0),
2129  _allocated_chunks_words(0),
2130  _allocated_chunks_count(0),
2131  _lock(lock)
2132{
2133  initialize();
2134}
2135
2136void SpaceManager::inc_size_metrics(size_t words) {
2137  assert_lock_strong(SpaceManager::expand_lock());
2138  // Total of allocated Metachunks and allocated Metachunks count
2139  // for each SpaceManager
2140  _allocated_chunks_words = _allocated_chunks_words + words;
2141  _allocated_chunks_count++;
2142  // Global total of capacity in allocated Metachunks
2143  MetaspaceAux::inc_capacity(mdtype(), words);
2144  // Global total of allocated Metablocks.
2145  // used_words_slow() includes the overhead in each
2146  // Metachunk so include it in the used when the
2147  // Metachunk is first added (so only added once per
2148  // Metachunk).
2149  MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2150}
2151
2152void SpaceManager::inc_used_metrics(size_t words) {
2153  // Add to the per SpaceManager total
2154  Atomic::add_ptr(words, &_allocated_blocks_words);
2155  // Add to the global total
2156  MetaspaceAux::inc_used(mdtype(), words);
2157}
2158
2159void SpaceManager::dec_total_from_size_metrics() {
2160  MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2161  MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2162  // Also deduct the overhead per Metachunk
2163  MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2164}
2165
2166void SpaceManager::initialize() {
2167  Metadebug::init_allocation_fail_alot_count();
2168  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2169    _chunks_in_use[i] = NULL;
2170  }
2171  _current_chunk = NULL;
2172  if (TraceMetadataChunkAllocation && Verbose) {
2173    gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, p2i(this));
2174  }
2175}
2176
2177void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
2178  if (chunks == NULL) {
2179    return;
2180  }
2181  ChunkList* list = free_chunks(index);
2182  assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
2183  assert_lock_strong(SpaceManager::expand_lock());
2184  Metachunk* cur = chunks;
2185
2186  // This returns chunks one at a time.  If a new
2187  // class List can be created that is a base class
2188  // of FreeList then something like FreeList::prepend()
2189  // can be used in place of this loop
2190  while (cur != NULL) {
2191    assert(cur->container() != NULL, "Container should have been set");
2192    cur->container()->dec_container_count();
2193    // Capture the next link before it is changed
2194    // by the call to return_chunk_at_head();
2195    Metachunk* next = cur->next();
2196    DEBUG_ONLY(cur->set_is_tagged_free(true);)
2197    list->return_chunk_at_head(cur);
2198    cur = next;
2199  }
2200}
2201
2202SpaceManager::~SpaceManager() {
2203  // This call this->_lock which can't be done while holding expand_lock()
2204  assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2205    err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2206            " allocated_chunks_words() " SIZE_FORMAT,
2207            sum_capacity_in_chunks_in_use(), allocated_chunks_words()));
2208
2209  MutexLockerEx fcl(SpaceManager::expand_lock(),
2210                    Mutex::_no_safepoint_check_flag);
2211
2212  chunk_manager()->slow_locked_verify();
2213
2214  dec_total_from_size_metrics();
2215
2216  if (TraceMetadataChunkAllocation && Verbose) {
2217    gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, p2i(this));
2218    locked_print_chunks_in_use_on(gclog_or_tty);
2219  }
2220
2221  // Do not mangle freed Metachunks.  The chunk size inside Metachunks
2222  // is during the freeing of a VirtualSpaceNodes.
2223
2224  // Have to update before the chunks_in_use lists are emptied
2225  // below.
2226  chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
2227                                         sum_count_in_chunks_in_use());
2228
2229  // Add all the chunks in use by this space manager
2230  // to the global list of free chunks.
2231
2232  // Follow each list of chunks-in-use and add them to the
2233  // free lists.  Each list is NULL terminated.
2234
2235  for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
2236    if (TraceMetadataChunkAllocation && Verbose) {
2237      gclog_or_tty->print_cr("returned " SIZE_FORMAT " %s chunks to freelist",
2238                             sum_count_in_chunks_in_use(i),
2239                             chunk_size_name(i));
2240    }
2241    Metachunk* chunks = chunks_in_use(i);
2242    chunk_manager()->return_chunks(i, chunks);
2243    set_chunks_in_use(i, NULL);
2244    if (TraceMetadataChunkAllocation && Verbose) {
2245      gclog_or_tty->print_cr("updated freelist count " SSIZE_FORMAT " %s",
2246                             chunk_manager()->free_chunks(i)->count(),
2247                             chunk_size_name(i));
2248    }
2249    assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2250  }
2251
2252  // The medium chunk case may be optimized by passing the head and
2253  // tail of the medium chunk list to add_at_head().  The tail is often
2254  // the current chunk but there are probably exceptions.
2255
2256  // Humongous chunks
2257  if (TraceMetadataChunkAllocation && Verbose) {
2258    gclog_or_tty->print_cr("returned " SIZE_FORMAT " %s humongous chunks to dictionary",
2259                            sum_count_in_chunks_in_use(HumongousIndex),
2260                            chunk_size_name(HumongousIndex));
2261    gclog_or_tty->print("Humongous chunk dictionary: ");
2262  }
2263  // Humongous chunks are never the current chunk.
2264  Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2265
2266  while (humongous_chunks != NULL) {
2267#ifdef ASSERT
2268    humongous_chunks->set_is_tagged_free(true);
2269#endif
2270    if (TraceMetadataChunkAllocation && Verbose) {
2271      gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
2272                          p2i(humongous_chunks),
2273                          humongous_chunks->word_size());
2274    }
2275    assert(humongous_chunks->word_size() == (size_t)
2276           align_size_up(humongous_chunks->word_size(),
2277                             smallest_chunk_size()),
2278           err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
2279                   " granularity " SIZE_FORMAT,
2280                   humongous_chunks->word_size(), smallest_chunk_size()));
2281    Metachunk* next_humongous_chunks = humongous_chunks->next();
2282    humongous_chunks->container()->dec_container_count();
2283    chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
2284    humongous_chunks = next_humongous_chunks;
2285  }
2286  if (TraceMetadataChunkAllocation && Verbose) {
2287    gclog_or_tty->cr();
2288    gclog_or_tty->print_cr("updated dictionary count " SIZE_FORMAT " %s",
2289                     chunk_manager()->humongous_dictionary()->total_count(),
2290                     chunk_size_name(HumongousIndex));
2291  }
2292  chunk_manager()->slow_locked_verify();
2293}
2294
2295const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
2296  switch (index) {
2297    case SpecializedIndex:
2298      return "Specialized";
2299    case SmallIndex:
2300      return "Small";
2301    case MediumIndex:
2302      return "Medium";
2303    case HumongousIndex:
2304      return "Humongous";
2305    default:
2306      return NULL;
2307  }
2308}
2309
2310ChunkIndex ChunkManager::list_index(size_t size) {
2311  switch (size) {
2312    case SpecializedChunk:
2313      assert(SpecializedChunk == ClassSpecializedChunk,
2314             "Need branch for ClassSpecializedChunk");
2315      return SpecializedIndex;
2316    case SmallChunk:
2317    case ClassSmallChunk:
2318      return SmallIndex;
2319    case MediumChunk:
2320    case ClassMediumChunk:
2321      return MediumIndex;
2322    default:
2323      assert(size > MediumChunk || size > ClassMediumChunk,
2324             "Not a humongous chunk");
2325      return HumongousIndex;
2326  }
2327}
2328
2329void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2330  assert_lock_strong(_lock);
2331  size_t raw_word_size = get_raw_word_size(word_size);
2332  size_t min_size = TreeChunk<Metablock, FreeList<Metablock> >::min_size();
2333  assert(raw_word_size >= min_size,
2334         err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
2335  block_freelists()->return_block(p, raw_word_size);
2336}
2337
2338// Adds a chunk to the list of chunks in use.
2339void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2340
2341  assert(new_chunk != NULL, "Should not be NULL");
2342  assert(new_chunk->next() == NULL, "Should not be on a list");
2343
2344  new_chunk->reset_empty();
2345
2346  // Find the correct list and and set the current
2347  // chunk for that list.
2348  ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
2349
2350  if (index != HumongousIndex) {
2351    retire_current_chunk();
2352    set_current_chunk(new_chunk);
2353    new_chunk->set_next(chunks_in_use(index));
2354    set_chunks_in_use(index, new_chunk);
2355  } else {
2356    // For null class loader data and DumpSharedSpaces, the first chunk isn't
2357    // small, so small will be null.  Link this first chunk as the current
2358    // chunk.
2359    if (make_current) {
2360      // Set as the current chunk but otherwise treat as a humongous chunk.
2361      set_current_chunk(new_chunk);
2362    }
2363    // Link at head.  The _current_chunk only points to a humongous chunk for
2364    // the null class loader metaspace (class and data virtual space managers)
2365    // any humongous chunks so will not point to the tail
2366    // of the humongous chunks list.
2367    new_chunk->set_next(chunks_in_use(HumongousIndex));
2368    set_chunks_in_use(HumongousIndex, new_chunk);
2369
2370    assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2371  }
2372
2373  // Add to the running sum of capacity
2374  inc_size_metrics(new_chunk->word_size());
2375
2376  assert(new_chunk->is_empty(), "Not ready for reuse");
2377  if (TraceMetadataChunkAllocation && Verbose) {
2378    gclog_or_tty->print("SpaceManager::add_chunk: " SIZE_FORMAT ") ",
2379                        sum_count_in_chunks_in_use());
2380    new_chunk->print_on(gclog_or_tty);
2381    chunk_manager()->locked_print_free_chunks(gclog_or_tty);
2382  }
2383}
2384
2385void SpaceManager::retire_current_chunk() {
2386  if (current_chunk() != NULL) {
2387    size_t remaining_words = current_chunk()->free_word_size();
2388    if (remaining_words >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
2389      block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
2390      inc_used_metrics(remaining_words);
2391    }
2392  }
2393}
2394
2395Metachunk* SpaceManager::get_new_chunk(size_t word_size,
2396                                       size_t grow_chunks_by_words) {
2397  // Get a chunk from the chunk freelist
2398  Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
2399
2400  if (next == NULL) {
2401    next = vs_list()->get_new_chunk(word_size,
2402                                    grow_chunks_by_words,
2403                                    medium_chunk_bunch());
2404  }
2405
2406  if (TraceMetadataHumongousAllocation && next != NULL &&
2407      SpaceManager::is_humongous(next->word_size())) {
2408    gclog_or_tty->print_cr("  new humongous chunk word size "
2409                           PTR_FORMAT, next->word_size());
2410  }
2411
2412  return next;
2413}
2414
2415MetaWord* SpaceManager::allocate(size_t word_size) {
2416  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2417
2418  size_t raw_word_size = get_raw_word_size(word_size);
2419  BlockFreelist* fl =  block_freelists();
2420  MetaWord* p = NULL;
2421  // Allocation from the dictionary is expensive in the sense that
2422  // the dictionary has to be searched for a size.  Don't allocate
2423  // from the dictionary until it starts to get fat.  Is this
2424  // a reasonable policy?  Maybe an skinny dictionary is fast enough
2425  // for allocations.  Do some profiling.  JJJ
2426  if (fl->total_size() > allocation_from_dictionary_limit) {
2427    p = fl->get_block(raw_word_size);
2428  }
2429  if (p == NULL) {
2430    p = allocate_work(raw_word_size);
2431  }
2432
2433  return p;
2434}
2435
2436// Returns the address of spaced allocated for "word_size".
2437// This methods does not know about blocks (Metablocks)
2438MetaWord* SpaceManager::allocate_work(size_t word_size) {
2439  assert_lock_strong(_lock);
2440#ifdef ASSERT
2441  if (Metadebug::test_metadata_failure()) {
2442    return NULL;
2443  }
2444#endif
2445  // Is there space in the current chunk?
2446  MetaWord* result = NULL;
2447
2448  // For DumpSharedSpaces, only allocate out of the current chunk which is
2449  // never null because we gave it the size we wanted.   Caller reports out
2450  // of memory if this returns null.
2451  if (DumpSharedSpaces) {
2452    assert(current_chunk() != NULL, "should never happen");
2453    inc_used_metrics(word_size);
2454    return current_chunk()->allocate(word_size); // caller handles null result
2455  }
2456
2457  if (current_chunk() != NULL) {
2458    result = current_chunk()->allocate(word_size);
2459  }
2460
2461  if (result == NULL) {
2462    result = grow_and_allocate(word_size);
2463  }
2464
2465  if (result != NULL) {
2466    inc_used_metrics(word_size);
2467    assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2468           "Head of the list is being allocated");
2469  }
2470
2471  return result;
2472}
2473
2474void SpaceManager::verify() {
2475  // If there are blocks in the dictionary, then
2476  // verification of chunks does not work since
2477  // being in the dictionary alters a chunk.
2478  if (block_freelists()->total_size() == 0) {
2479    for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2480      Metachunk* curr = chunks_in_use(i);
2481      while (curr != NULL) {
2482        curr->verify();
2483        verify_chunk_size(curr);
2484        curr = curr->next();
2485      }
2486    }
2487  }
2488}
2489
2490void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2491  assert(is_humongous(chunk->word_size()) ||
2492         chunk->word_size() == medium_chunk_size() ||
2493         chunk->word_size() == small_chunk_size() ||
2494         chunk->word_size() == specialized_chunk_size(),
2495         "Chunk size is wrong");
2496  return;
2497}
2498
2499#ifdef ASSERT
2500void SpaceManager::verify_allocated_blocks_words() {
2501  // Verification is only guaranteed at a safepoint.
2502  assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
2503    "Verification can fail if the applications is running");
2504  assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2505    err_msg("allocation total is not consistent " SIZE_FORMAT
2506            " vs " SIZE_FORMAT,
2507            allocated_blocks_words(), sum_used_in_chunks_in_use()));
2508}
2509
2510#endif
2511
2512void SpaceManager::dump(outputStream* const out) const {
2513  size_t curr_total = 0;
2514  size_t waste = 0;
2515  uint i = 0;
2516  size_t used = 0;
2517  size_t capacity = 0;
2518
2519  // Add up statistics for all chunks in this SpaceManager.
2520  for (ChunkIndex index = ZeroIndex;
2521       index < NumberOfInUseLists;
2522       index = next_chunk_index(index)) {
2523    for (Metachunk* curr = chunks_in_use(index);
2524         curr != NULL;
2525         curr = curr->next()) {
2526      out->print("%d) ", i++);
2527      curr->print_on(out);
2528      curr_total += curr->word_size();
2529      used += curr->used_word_size();
2530      capacity += curr->word_size();
2531      waste += curr->free_word_size() + curr->overhead();;
2532    }
2533  }
2534
2535  if (TraceMetadataChunkAllocation && Verbose) {
2536    block_freelists()->print_on(out);
2537  }
2538
2539  size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2540  // Free space isn't wasted.
2541  waste -= free;
2542
2543  out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
2544                " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2545                " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2546}
2547
2548#ifndef PRODUCT
2549void SpaceManager::mangle_freed_chunks() {
2550  for (ChunkIndex index = ZeroIndex;
2551       index < NumberOfInUseLists;
2552       index = next_chunk_index(index)) {
2553    for (Metachunk* curr = chunks_in_use(index);
2554         curr != NULL;
2555         curr = curr->next()) {
2556      curr->mangle();
2557    }
2558  }
2559}
2560#endif // PRODUCT
2561
2562// MetaspaceAux
2563
2564
2565size_t MetaspaceAux::_capacity_words[] = {0, 0};
2566size_t MetaspaceAux::_used_words[] = {0, 0};
2567
2568size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
2569  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2570  return list == NULL ? 0 : list->free_bytes();
2571}
2572
2573size_t MetaspaceAux::free_bytes() {
2574  return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
2575}
2576
2577void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2578  assert_lock_strong(SpaceManager::expand_lock());
2579  assert(words <= capacity_words(mdtype),
2580    err_msg("About to decrement below 0: words " SIZE_FORMAT
2581            " is greater than _capacity_words[%u] " SIZE_FORMAT,
2582            words, mdtype, capacity_words(mdtype)));
2583  _capacity_words[mdtype] -= words;
2584}
2585
2586void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2587  assert_lock_strong(SpaceManager::expand_lock());
2588  // Needs to be atomic
2589  _capacity_words[mdtype] += words;
2590}
2591
2592void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2593  assert(words <= used_words(mdtype),
2594    err_msg("About to decrement below 0: words " SIZE_FORMAT
2595            " is greater than _used_words[%u] " SIZE_FORMAT,
2596            words, mdtype, used_words(mdtype)));
2597  // For CMS deallocation of the Metaspaces occurs during the
2598  // sweep which is a concurrent phase.  Protection by the expand_lock()
2599  // is not enough since allocation is on a per Metaspace basis
2600  // and protected by the Metaspace lock.
2601  jlong minus_words = (jlong) - (jlong) words;
2602  Atomic::add_ptr(minus_words, &_used_words[mdtype]);
2603}
2604
2605void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2606  // _used_words tracks allocations for
2607  // each piece of metadata.  Those allocations are
2608  // generally done concurrently by different application
2609  // threads so must be done atomically.
2610  Atomic::add_ptr(words, &_used_words[mdtype]);
2611}
2612
2613size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2614  size_t used = 0;
2615  ClassLoaderDataGraphMetaspaceIterator iter;
2616  while (iter.repeat()) {
2617    Metaspace* msp = iter.get_next();
2618    // Sum allocated_blocks_words for each metaspace
2619    if (msp != NULL) {
2620      used += msp->used_words_slow(mdtype);
2621    }
2622  }
2623  return used * BytesPerWord;
2624}
2625
2626size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
2627  size_t free = 0;
2628  ClassLoaderDataGraphMetaspaceIterator iter;
2629  while (iter.repeat()) {
2630    Metaspace* msp = iter.get_next();
2631    if (msp != NULL) {
2632      free += msp->free_words_slow(mdtype);
2633    }
2634  }
2635  return free * BytesPerWord;
2636}
2637
2638size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2639  if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
2640    return 0;
2641  }
2642  // Don't count the space in the freelists.  That space will be
2643  // added to the capacity calculation as needed.
2644  size_t capacity = 0;
2645  ClassLoaderDataGraphMetaspaceIterator iter;
2646  while (iter.repeat()) {
2647    Metaspace* msp = iter.get_next();
2648    if (msp != NULL) {
2649      capacity += msp->capacity_words_slow(mdtype);
2650    }
2651  }
2652  return capacity * BytesPerWord;
2653}
2654
2655size_t MetaspaceAux::capacity_bytes_slow() {
2656#ifdef PRODUCT
2657  // Use capacity_bytes() in PRODUCT instead of this function.
2658  guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
2659#endif
2660  size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
2661  size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
2662  assert(capacity_bytes() == class_capacity + non_class_capacity,
2663      err_msg("bad accounting: capacity_bytes() " SIZE_FORMAT
2664        " class_capacity + non_class_capacity " SIZE_FORMAT
2665        " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
2666        capacity_bytes(), class_capacity + non_class_capacity,
2667        class_capacity, non_class_capacity));
2668
2669  return class_capacity + non_class_capacity;
2670}
2671
2672size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
2673  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2674  return list == NULL ? 0 : list->reserved_bytes();
2675}
2676
2677size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
2678  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2679  return list == NULL ? 0 : list->committed_bytes();
2680}
2681
2682size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
2683
2684size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
2685  ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
2686  if (chunk_manager == NULL) {
2687    return 0;
2688  }
2689  chunk_manager->slow_verify();
2690  return chunk_manager->free_chunks_total_words();
2691}
2692
2693size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
2694  return free_chunks_total_words(mdtype) * BytesPerWord;
2695}
2696
2697size_t MetaspaceAux::free_chunks_total_words() {
2698  return free_chunks_total_words(Metaspace::ClassType) +
2699         free_chunks_total_words(Metaspace::NonClassType);
2700}
2701
2702size_t MetaspaceAux::free_chunks_total_bytes() {
2703  return free_chunks_total_words() * BytesPerWord;
2704}
2705
2706bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) {
2707  return Metaspace::get_chunk_manager(mdtype) != NULL;
2708}
2709
2710MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
2711  if (!has_chunk_free_list(mdtype)) {
2712    return MetaspaceChunkFreeListSummary();
2713  }
2714
2715  const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
2716  return cm->chunk_free_list_summary();
2717}
2718
2719void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2720  gclog_or_tty->print(", [Metaspace:");
2721  if (PrintGCDetails && Verbose) {
2722    gclog_or_tty->print(" "  SIZE_FORMAT
2723                        "->" SIZE_FORMAT
2724                        "("  SIZE_FORMAT ")",
2725                        prev_metadata_used,
2726                        used_bytes(),
2727                        reserved_bytes());
2728  } else {
2729    gclog_or_tty->print(" "  SIZE_FORMAT "K"
2730                        "->" SIZE_FORMAT "K"
2731                        "("  SIZE_FORMAT "K)",
2732                        prev_metadata_used/K,
2733                        used_bytes()/K,
2734                        reserved_bytes()/K);
2735  }
2736
2737  gclog_or_tty->print("]");
2738}
2739
2740// This is printed when PrintGCDetails
2741void MetaspaceAux::print_on(outputStream* out) {
2742  Metaspace::MetadataType nct = Metaspace::NonClassType;
2743
2744  out->print_cr(" Metaspace       "
2745                "used "      SIZE_FORMAT "K, "
2746                "capacity "  SIZE_FORMAT "K, "
2747                "committed " SIZE_FORMAT "K, "
2748                "reserved "  SIZE_FORMAT "K",
2749                used_bytes()/K,
2750                capacity_bytes()/K,
2751                committed_bytes()/K,
2752                reserved_bytes()/K);
2753
2754  if (Metaspace::using_class_space()) {
2755    Metaspace::MetadataType ct = Metaspace::ClassType;
2756    out->print_cr("  class space    "
2757                  "used "      SIZE_FORMAT "K, "
2758                  "capacity "  SIZE_FORMAT "K, "
2759                  "committed " SIZE_FORMAT "K, "
2760                  "reserved "  SIZE_FORMAT "K",
2761                  used_bytes(ct)/K,
2762                  capacity_bytes(ct)/K,
2763                  committed_bytes(ct)/K,
2764                  reserved_bytes(ct)/K);
2765  }
2766}
2767
2768// Print information for class space and data space separately.
2769// This is almost the same as above.
2770void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2771  size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
2772  size_t capacity_bytes = capacity_bytes_slow(mdtype);
2773  size_t used_bytes = used_bytes_slow(mdtype);
2774  size_t free_bytes = free_bytes_slow(mdtype);
2775  size_t used_and_free = used_bytes + free_bytes +
2776                           free_chunks_capacity_bytes;
2777  out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
2778             "K + unused in chunks " SIZE_FORMAT "K  + "
2779             " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2780             "K  capacity in allocated chunks " SIZE_FORMAT "K",
2781             used_bytes / K,
2782             free_bytes / K,
2783             free_chunks_capacity_bytes / K,
2784             used_and_free / K,
2785             capacity_bytes / K);
2786  // Accounting can only be correct if we got the values during a safepoint
2787  assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2788}
2789
2790// Print total fragmentation for class metaspaces
2791void MetaspaceAux::print_class_waste(outputStream* out) {
2792  assert(Metaspace::using_class_space(), "class metaspace not used");
2793  size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
2794  size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
2795  ClassLoaderDataGraphMetaspaceIterator iter;
2796  while (iter.repeat()) {
2797    Metaspace* msp = iter.get_next();
2798    if (msp != NULL) {
2799      cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2800      cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2801      cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2802      cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2803      cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2804      cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2805      cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2806    }
2807  }
2808  out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2809                SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2810                SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2811                "large count " SIZE_FORMAT,
2812                cls_specialized_count, cls_specialized_waste,
2813                cls_small_count, cls_small_waste,
2814                cls_medium_count, cls_medium_waste, cls_humongous_count);
2815}
2816
2817// Print total fragmentation for data and class metaspaces separately
2818void MetaspaceAux::print_waste(outputStream* out) {
2819  size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
2820  size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
2821
2822  ClassLoaderDataGraphMetaspaceIterator iter;
2823  while (iter.repeat()) {
2824    Metaspace* msp = iter.get_next();
2825    if (msp != NULL) {
2826      specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2827      specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2828      small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2829      small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2830      medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2831      medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2832      humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2833    }
2834  }
2835  out->print_cr("Total fragmentation waste (words) doesn't count free space");
2836  out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2837                        SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2838                        SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2839                        "large count " SIZE_FORMAT,
2840             specialized_count, specialized_waste, small_count,
2841             small_waste, medium_count, medium_waste, humongous_count);
2842  if (Metaspace::using_class_space()) {
2843    print_class_waste(out);
2844  }
2845}
2846
2847// Dump global metaspace things from the end of ClassLoaderDataGraph
2848void MetaspaceAux::dump(outputStream* out) {
2849  out->print_cr("All Metaspace:");
2850  out->print("data space: "); print_on(out, Metaspace::NonClassType);
2851  out->print("class space: "); print_on(out, Metaspace::ClassType);
2852  print_waste(out);
2853}
2854
2855void MetaspaceAux::verify_free_chunks() {
2856  Metaspace::chunk_manager_metadata()->verify();
2857  if (Metaspace::using_class_space()) {
2858    Metaspace::chunk_manager_class()->verify();
2859  }
2860}
2861
2862void MetaspaceAux::verify_capacity() {
2863#ifdef ASSERT
2864  size_t running_sum_capacity_bytes = capacity_bytes();
2865  // For purposes of the running sum of capacity, verify against capacity
2866  size_t capacity_in_use_bytes = capacity_bytes_slow();
2867  assert(running_sum_capacity_bytes == capacity_in_use_bytes,
2868    err_msg("capacity_words() * BytesPerWord " SIZE_FORMAT
2869            " capacity_bytes_slow()" SIZE_FORMAT,
2870            running_sum_capacity_bytes, capacity_in_use_bytes));
2871  for (Metaspace::MetadataType i = Metaspace::ClassType;
2872       i < Metaspace:: MetadataTypeCount;
2873       i = (Metaspace::MetadataType)(i + 1)) {
2874    size_t capacity_in_use_bytes = capacity_bytes_slow(i);
2875    assert(capacity_bytes(i) == capacity_in_use_bytes,
2876      err_msg("capacity_bytes(%u) " SIZE_FORMAT
2877              " capacity_bytes_slow(%u)" SIZE_FORMAT,
2878              i, capacity_bytes(i), i, capacity_in_use_bytes));
2879  }
2880#endif
2881}
2882
2883void MetaspaceAux::verify_used() {
2884#ifdef ASSERT
2885  size_t running_sum_used_bytes = used_bytes();
2886  // For purposes of the running sum of used, verify against used
2887  size_t used_in_use_bytes = used_bytes_slow();
2888  assert(used_bytes() == used_in_use_bytes,
2889    err_msg("used_bytes() " SIZE_FORMAT
2890            " used_bytes_slow()" SIZE_FORMAT,
2891            used_bytes(), used_in_use_bytes));
2892  for (Metaspace::MetadataType i = Metaspace::ClassType;
2893       i < Metaspace:: MetadataTypeCount;
2894       i = (Metaspace::MetadataType)(i + 1)) {
2895    size_t used_in_use_bytes = used_bytes_slow(i);
2896    assert(used_bytes(i) == used_in_use_bytes,
2897      err_msg("used_bytes(%u) " SIZE_FORMAT
2898              " used_bytes_slow(%u)" SIZE_FORMAT,
2899              i, used_bytes(i), i, used_in_use_bytes));
2900  }
2901#endif
2902}
2903
2904void MetaspaceAux::verify_metrics() {
2905  verify_capacity();
2906  verify_used();
2907}
2908
2909
2910// Metaspace methods
2911
2912size_t Metaspace::_first_chunk_word_size = 0;
2913size_t Metaspace::_first_class_chunk_word_size = 0;
2914
2915size_t Metaspace::_commit_alignment = 0;
2916size_t Metaspace::_reserve_alignment = 0;
2917
2918Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2919  initialize(lock, type);
2920}
2921
2922Metaspace::~Metaspace() {
2923  delete _vsm;
2924  if (using_class_space()) {
2925    delete _class_vsm;
2926  }
2927}
2928
2929VirtualSpaceList* Metaspace::_space_list = NULL;
2930VirtualSpaceList* Metaspace::_class_space_list = NULL;
2931
2932ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
2933ChunkManager* Metaspace::_chunk_manager_class = NULL;
2934
2935#define VIRTUALSPACEMULTIPLIER 2
2936
2937#ifdef _LP64
2938static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
2939
2940void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
2941  // Figure out the narrow_klass_base and the narrow_klass_shift.  The
2942  // narrow_klass_base is the lower of the metaspace base and the cds base
2943  // (if cds is enabled).  The narrow_klass_shift depends on the distance
2944  // between the lower base and higher address.
2945  address lower_base;
2946  address higher_address;
2947#if INCLUDE_CDS
2948  if (UseSharedSpaces) {
2949    higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2950                          (address)(metaspace_base + compressed_class_space_size()));
2951    lower_base = MIN2(metaspace_base, cds_base);
2952  } else
2953#endif
2954  {
2955    higher_address = metaspace_base + compressed_class_space_size();
2956    lower_base = metaspace_base;
2957
2958    uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
2959    // If compressed class space fits in lower 32G, we don't need a base.
2960    if (higher_address <= (address)klass_encoding_max) {
2961      lower_base = 0; // Effectively lower base is zero.
2962    }
2963  }
2964
2965  Universe::set_narrow_klass_base(lower_base);
2966
2967  if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
2968    Universe::set_narrow_klass_shift(0);
2969  } else {
2970    assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
2971    Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
2972  }
2973}
2974
2975#if INCLUDE_CDS
2976// Return TRUE if the specified metaspace_base and cds_base are close enough
2977// to work with compressed klass pointers.
2978bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
2979  assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
2980  assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2981  address lower_base = MIN2((address)metaspace_base, cds_base);
2982  address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2983                                (address)(metaspace_base + compressed_class_space_size()));
2984  return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
2985}
2986#endif
2987
2988// Try to allocate the metaspace at the requested addr.
2989void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
2990  assert(using_class_space(), "called improperly");
2991  assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2992  assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
2993         "Metaspace size is too big");
2994  assert_is_ptr_aligned(requested_addr, _reserve_alignment);
2995  assert_is_ptr_aligned(cds_base, _reserve_alignment);
2996  assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment);
2997
2998  // Don't use large pages for the class space.
2999  bool large_pages = false;
3000
3001#ifndef AARCH64
3002  ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
3003                                             _reserve_alignment,
3004                                             large_pages,
3005                                             requested_addr);
3006#else // AARCH64
3007  ReservedSpace metaspace_rs;
3008
3009  // Our compressed klass pointers may fit nicely into the lower 32
3010  // bits.
3011  if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
3012    metaspace_rs = ReservedSpace(compressed_class_space_size(),
3013                                             _reserve_alignment,
3014                                             large_pages,
3015                                             requested_addr);
3016  }
3017
3018  if (! metaspace_rs.is_reserved()) {
3019    // Try to align metaspace so that we can decode a compressed klass
3020    // with a single MOVK instruction.  We can do this iff the
3021    // compressed class base is a multiple of 4G.
3022    for (char *a = (char*)align_ptr_up(requested_addr, 4*G);
3023         a < (char*)(1024*G);
3024         a += 4*G) {
3025
3026#if INCLUDE_CDS
3027      if (UseSharedSpaces
3028          && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
3029        // We failed to find an aligned base that will reach.  Fall
3030        // back to using our requested addr.
3031        metaspace_rs = ReservedSpace(compressed_class_space_size(),
3032                                     _reserve_alignment,
3033                                     large_pages,
3034                                     requested_addr);
3035        break;
3036      }
3037#endif
3038
3039      metaspace_rs = ReservedSpace(compressed_class_space_size(),
3040                                   _reserve_alignment,
3041                                   large_pages,
3042                                   a);
3043      if (metaspace_rs.is_reserved())
3044        break;
3045    }
3046  }
3047
3048#endif // AARCH64
3049
3050  if (!metaspace_rs.is_reserved()) {
3051#if INCLUDE_CDS
3052    if (UseSharedSpaces) {
3053      size_t increment = align_size_up(1*G, _reserve_alignment);
3054
3055      // Keep trying to allocate the metaspace, increasing the requested_addr
3056      // by 1GB each time, until we reach an address that will no longer allow
3057      // use of CDS with compressed klass pointers.
3058      char *addr = requested_addr;
3059      while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
3060             can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
3061        addr = addr + increment;
3062        metaspace_rs = ReservedSpace(compressed_class_space_size(),
3063                                     _reserve_alignment, large_pages, addr);
3064      }
3065    }
3066#endif
3067    // If no successful allocation then try to allocate the space anywhere.  If
3068    // that fails then OOM doom.  At this point we cannot try allocating the
3069    // metaspace as if UseCompressedClassPointers is off because too much
3070    // initialization has happened that depends on UseCompressedClassPointers.
3071    // So, UseCompressedClassPointers cannot be turned off at this point.
3072    if (!metaspace_rs.is_reserved()) {
3073      metaspace_rs = ReservedSpace(compressed_class_space_size(),
3074                                   _reserve_alignment, large_pages);
3075      if (!metaspace_rs.is_reserved()) {
3076        vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
3077                                              compressed_class_space_size()));
3078      }
3079    }
3080  }
3081
3082  // If we got here then the metaspace got allocated.
3083  MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3084
3085#if INCLUDE_CDS
3086  // Verify that we can use shared spaces.  Otherwise, turn off CDS.
3087  if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3088    FileMapInfo::stop_sharing_and_unmap(
3089        "Could not allocate metaspace at a compatible address");
3090  }
3091#endif
3092  set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3093                                  UseSharedSpaces ? (address)cds_base : 0);
3094
3095  initialize_class_space(metaspace_rs);
3096
3097  if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
3098    gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
3099                            p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
3100    gclog_or_tty->print_cr("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
3101                           compressed_class_space_size(), p2i(metaspace_rs.base()), p2i(requested_addr));
3102  }
3103}
3104
3105// For UseCompressedClassPointers the class space is reserved above the top of
3106// the Java heap.  The argument passed in is at the base of the compressed space.
3107void Metaspace::initialize_class_space(ReservedSpace rs) {
3108  // The reserved space size may be bigger because of alignment, esp with UseLargePages
3109  assert(rs.size() >= CompressedClassSpaceSize,
3110         err_msg(SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize));
3111  assert(using_class_space(), "Must be using class space");
3112  _class_space_list = new VirtualSpaceList(rs);
3113  _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
3114
3115  if (!_class_space_list->initialization_succeeded()) {
3116    vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
3117  }
3118}
3119
3120#endif
3121
3122void Metaspace::ergo_initialize() {
3123  if (DumpSharedSpaces) {
3124    // Using large pages when dumping the shared archive is currently not implemented.
3125    FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
3126  }
3127
3128  size_t page_size = os::vm_page_size();
3129  if (UseLargePages && UseLargePagesInMetaspace) {
3130    page_size = os::large_page_size();
3131  }
3132
3133  _commit_alignment  = page_size;
3134  _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
3135
3136  // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
3137  // override if MaxMetaspaceSize was set on the command line or not.
3138  // This information is needed later to conform to the specification of the
3139  // java.lang.management.MemoryUsage API.
3140  //
3141  // Ideally, we would be able to set the default value of MaxMetaspaceSize in
3142  // globals.hpp to the aligned value, but this is not possible, since the
3143  // alignment depends on other flags being parsed.
3144  MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment);
3145
3146  if (MetaspaceSize > MaxMetaspaceSize) {
3147    MetaspaceSize = MaxMetaspaceSize;
3148  }
3149
3150  MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment);
3151
3152  assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
3153
3154  if (MetaspaceSize < 256*K) {
3155    vm_exit_during_initialization("Too small initial Metaspace size");
3156  }
3157
3158  MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment);
3159  MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
3160
3161  CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
3162  set_compressed_class_space_size(CompressedClassSpaceSize);
3163}
3164
3165void Metaspace::global_initialize() {
3166  MetaspaceGC::initialize();
3167
3168  // Initialize the alignment for shared spaces.
3169  int max_alignment = os::vm_allocation_granularity();
3170  size_t cds_total = 0;
3171
3172  MetaspaceShared::set_max_alignment(max_alignment);
3173
3174  if (DumpSharedSpaces) {
3175#if INCLUDE_CDS
3176    MetaspaceShared::estimate_regions_size();
3177
3178    SharedReadOnlySize  = align_size_up(SharedReadOnlySize,  max_alignment);
3179    SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
3180    SharedMiscDataSize  = align_size_up(SharedMiscDataSize,  max_alignment);
3181    SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize,  max_alignment);
3182
3183    // make sure SharedReadOnlySize and SharedReadWriteSize are not less than
3184    // the minimum values.
3185    if (SharedReadOnlySize < MetaspaceShared::min_ro_size){
3186      report_out_of_shared_space(SharedReadOnly);
3187    }
3188
3189    if (SharedReadWriteSize < MetaspaceShared::min_rw_size){
3190      report_out_of_shared_space(SharedReadWrite);
3191    }
3192
3193    // the min_misc_data_size and min_misc_code_size estimates are based on
3194    // MetaspaceShared::generate_vtable_methods().
3195    // The minimum size only accounts for the vtable methods. Any size less than the
3196    // minimum required size would cause vm crash when allocating the vtable methods.
3197    uint min_misc_data_size = align_size_up(
3198      MetaspaceShared::num_virtuals * MetaspaceShared::vtbl_list_size * sizeof(void*), max_alignment);
3199
3200    if (SharedMiscDataSize < min_misc_data_size) {
3201      report_out_of_shared_space(SharedMiscData);
3202    }
3203
3204    uintx min_misc_code_size = align_size_up(
3205      (MetaspaceShared::num_virtuals * MetaspaceShared::vtbl_list_size) *
3206        (sizeof(void*) + MetaspaceShared::vtbl_method_size) + MetaspaceShared::vtbl_common_code_size,
3207          max_alignment);
3208
3209    if (SharedMiscCodeSize < min_misc_code_size) {
3210      report_out_of_shared_space(SharedMiscCode);
3211    }
3212
3213    // Initialize with the sum of the shared space sizes.  The read-only
3214    // and read write metaspace chunks will be allocated out of this and the
3215    // remainder is the misc code and data chunks.
3216    cds_total = FileMapInfo::shared_spaces_size();
3217    cds_total = align_size_up(cds_total, _reserve_alignment);
3218    _space_list = new VirtualSpaceList(cds_total/wordSize);
3219    _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3220
3221    if (!_space_list->initialization_succeeded()) {
3222      vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3223    }
3224
3225#ifdef _LP64
3226    if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
3227      vm_exit_during_initialization("Unable to dump shared archive.",
3228          err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3229                  SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3230                  "klass limit: " UINT64_FORMAT, cds_total, compressed_class_space_size(),
3231                  cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
3232    }
3233
3234    // Set the compressed klass pointer base so that decoding of these pointers works
3235    // properly when creating the shared archive.
3236    assert(UseCompressedOops && UseCompressedClassPointers,
3237      "UseCompressedOops and UseCompressedClassPointers must be set");
3238    Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
3239    if (TraceMetavirtualspaceAllocation && Verbose) {
3240      gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
3241                             p2i(_space_list->current_virtual_space()->bottom()));
3242    }
3243
3244    Universe::set_narrow_klass_shift(0);
3245#endif // _LP64
3246#endif // INCLUDE_CDS
3247  } else {
3248#if INCLUDE_CDS
3249    // If using shared space, open the file that contains the shared space
3250    // and map in the memory before initializing the rest of metaspace (so
3251    // the addresses don't conflict)
3252    address cds_address = NULL;
3253    if (UseSharedSpaces) {
3254      FileMapInfo* mapinfo = new FileMapInfo();
3255
3256      // Open the shared archive file, read and validate the header. If
3257      // initialization fails, shared spaces [UseSharedSpaces] are
3258      // disabled and the file is closed.
3259      // Map in spaces now also
3260      if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3261        cds_total = FileMapInfo::shared_spaces_size();
3262        cds_address = (address)mapinfo->region_base(0);
3263      } else {
3264        assert(!mapinfo->is_open() && !UseSharedSpaces,
3265               "archive file not closed or shared spaces not disabled.");
3266      }
3267    }
3268#endif // INCLUDE_CDS
3269#ifdef _LP64
3270    // If UseCompressedClassPointers is set then allocate the metaspace area
3271    // above the heap and above the CDS area (if it exists).
3272    if (using_class_space()) {
3273      if (UseSharedSpaces) {
3274#if INCLUDE_CDS
3275        char* cds_end = (char*)(cds_address + cds_total);
3276        cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
3277        allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3278#endif
3279      } else {
3280        char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
3281        allocate_metaspace_compressed_klass_ptrs(base, 0);
3282      }
3283    }
3284#endif // _LP64
3285
3286    // Initialize these before initializing the VirtualSpaceList
3287    _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3288    _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3289    // Make the first class chunk bigger than a medium chunk so it's not put
3290    // on the medium chunk list.   The next chunk will be small and progress
3291    // from there.  This size calculated by -version.
3292    _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3293                                       (CompressedClassSpaceSize/BytesPerWord)*2);
3294    _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3295    // Arbitrarily set the initial virtual space to a multiple
3296    // of the boot class loader size.
3297    size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3298    word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
3299
3300    // Initialize the list of virtual spaces.
3301    _space_list = new VirtualSpaceList(word_size);
3302    _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3303
3304    if (!_space_list->initialization_succeeded()) {
3305      vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
3306    }
3307  }
3308
3309  _tracer = new MetaspaceTracer();
3310}
3311
3312void Metaspace::post_initialize() {
3313  MetaspaceGC::post_initialize();
3314}
3315
3316Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
3317                                               size_t chunk_word_size,
3318                                               size_t chunk_bunch) {
3319  // Get a chunk from the chunk freelist
3320  Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
3321  if (chunk != NULL) {
3322    return chunk;
3323  }
3324
3325  return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch);
3326}
3327
3328void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3329
3330  assert(space_list() != NULL,
3331    "Metadata VirtualSpaceList has not been initialized");
3332  assert(chunk_manager_metadata() != NULL,
3333    "Metadata ChunkManager has not been initialized");
3334
3335  _vsm = new SpaceManager(NonClassType, lock);
3336  if (_vsm == NULL) {
3337    return;
3338  }
3339  size_t word_size;
3340  size_t class_word_size;
3341  vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
3342
3343  if (using_class_space()) {
3344  assert(class_space_list() != NULL,
3345    "Class VirtualSpaceList has not been initialized");
3346  assert(chunk_manager_class() != NULL,
3347    "Class ChunkManager has not been initialized");
3348
3349    // Allocate SpaceManager for classes.
3350    _class_vsm = new SpaceManager(ClassType, lock);
3351    if (_class_vsm == NULL) {
3352      return;
3353    }
3354  }
3355
3356  MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3357
3358  // Allocate chunk for metadata objects
3359  Metachunk* new_chunk = get_initialization_chunk(NonClassType,
3360                                                  word_size,
3361                                                  vsm()->medium_chunk_bunch());
3362  // For dumping shared archive, report error if allocation has failed.
3363  if (DumpSharedSpaces && new_chunk == NULL) {
3364    report_insufficient_metaspace(MetaspaceAux::committed_bytes() + word_size * BytesPerWord);
3365  }
3366  assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
3367  if (new_chunk != NULL) {
3368    // Add to this manager's list of chunks in use and current_chunk().
3369    vsm()->add_chunk(new_chunk, true);
3370  }
3371
3372  // Allocate chunk for class metadata objects
3373  if (using_class_space()) {
3374    Metachunk* class_chunk = get_initialization_chunk(ClassType,
3375                                                      class_word_size,
3376                                                      class_vsm()->medium_chunk_bunch());
3377    if (class_chunk != NULL) {
3378      class_vsm()->add_chunk(class_chunk, true);
3379    } else {
3380      // For dumping shared archive, report error if allocation has failed.
3381      if (DumpSharedSpaces) {
3382        report_insufficient_metaspace(MetaspaceAux::committed_bytes() + class_word_size * BytesPerWord);
3383      }
3384    }
3385  }
3386
3387  _alloc_record_head = NULL;
3388  _alloc_record_tail = NULL;
3389}
3390
3391size_t Metaspace::align_word_size_up(size_t word_size) {
3392  size_t byte_size = word_size * wordSize;
3393  return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
3394}
3395
3396MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
3397  // DumpSharedSpaces doesn't use class metadata area (yet)
3398  // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
3399  if (is_class_space_allocation(mdtype)) {
3400    return  class_vsm()->allocate(word_size);
3401  } else {
3402    return  vsm()->allocate(word_size);
3403  }
3404}
3405
3406MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3407  size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3408  assert(delta_bytes > 0, "Must be");
3409
3410  size_t before = 0;
3411  size_t after = 0;
3412  MetaWord* res;
3413  bool incremented;
3414
3415  // Each thread increments the HWM at most once. Even if the thread fails to increment
3416  // the HWM, an allocation is still attempted. This is because another thread must then
3417  // have incremented the HWM and therefore the allocation might still succeed.
3418  do {
3419    incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
3420    res = allocate(word_size, mdtype);
3421  } while (!incremented && res == NULL);
3422
3423  if (incremented) {
3424    tracer()->report_gc_threshold(before, after,
3425                                  MetaspaceGCThresholdUpdater::ExpandAndAllocate);
3426    if (PrintGCDetails && Verbose) {
3427      gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
3428          " to " SIZE_FORMAT, before, after);
3429    }
3430  }
3431
3432  return res;
3433}
3434
3435// Space allocated in the Metaspace.  This may
3436// be across several metadata virtual spaces.
3437char* Metaspace::bottom() const {
3438  assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
3439  return (char*)vsm()->current_chunk()->bottom();
3440}
3441
3442size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3443  if (mdtype == ClassType) {
3444    return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3445  } else {
3446    return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
3447  }
3448}
3449
3450size_t Metaspace::free_words_slow(MetadataType mdtype) const {
3451  if (mdtype == ClassType) {
3452    return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
3453  } else {
3454    return vsm()->sum_free_in_chunks_in_use();
3455  }
3456}
3457
3458// Space capacity in the Metaspace.  It includes
3459// space in the list of chunks from which allocations
3460// have been made. Don't include space in the global freelist and
3461// in the space available in the dictionary which
3462// is already counted in some chunk.
3463size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
3464  if (mdtype == ClassType) {
3465    return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
3466  } else {
3467    return vsm()->sum_capacity_in_chunks_in_use();
3468  }
3469}
3470
3471size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
3472  return used_words_slow(mdtype) * BytesPerWord;
3473}
3474
3475size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
3476  return capacity_words_slow(mdtype) * BytesPerWord;
3477}
3478
3479size_t Metaspace::allocated_blocks_bytes() const {
3480  return vsm()->allocated_blocks_bytes() +
3481      (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
3482}
3483
3484size_t Metaspace::allocated_chunks_bytes() const {
3485  return vsm()->allocated_chunks_bytes() +
3486      (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
3487}
3488
3489void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
3490  assert(!SafepointSynchronize::is_at_safepoint()
3491         || Thread::current()->is_VM_thread(), "should be the VM thread");
3492
3493  if (DumpSharedSpaces && PrintSharedSpaces) {
3494    record_deallocation(ptr, vsm()->get_raw_word_size(word_size));
3495  }
3496
3497  MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3498
3499  if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
3500    // Dark matter.  Too small for dictionary.
3501#ifdef ASSERT
3502    Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3503#endif
3504    return;
3505  }
3506  if (is_class && using_class_space()) {
3507    class_vsm()->deallocate(ptr, word_size);
3508  } else {
3509    vsm()->deallocate(ptr, word_size);
3510  }
3511}
3512
3513
3514MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3515                              bool read_only, MetaspaceObj::Type type, TRAPS) {
3516  if (HAS_PENDING_EXCEPTION) {
3517    assert(false, "Should not allocate with exception pending");
3518    return NULL;  // caller does a CHECK_NULL too
3519  }
3520
3521  assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3522        "ClassLoaderData::the_null_class_loader_data() should have been used.");
3523
3524  // Allocate in metaspaces without taking out a lock, because it deadlocks
3525  // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
3526  // to revisit this for application class data sharing.
3527  if (DumpSharedSpaces) {
3528    assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3529    Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3530    MetaWord* result = space->allocate(word_size, NonClassType);
3531    if (result == NULL) {
3532      report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3533    }
3534    if (PrintSharedSpaces) {
3535      space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3536    }
3537
3538    // Zero initialize.
3539    Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3540
3541    return result;
3542  }
3543
3544  MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3545
3546  // Try to allocate metadata.
3547  MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3548
3549  if (result == NULL) {
3550    tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
3551
3552    // Allocation failed.
3553    if (is_init_completed()) {
3554      // Only start a GC if the bootstrapping has completed.
3555
3556      // Try to clean out some memory and retry.
3557      result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3558          loader_data, word_size, mdtype);
3559    }
3560  }
3561
3562  if (result == NULL) {
3563    report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
3564  }
3565
3566  // Zero initialize.
3567  Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3568
3569  return result;
3570}
3571
3572size_t Metaspace::class_chunk_size(size_t word_size) {
3573  assert(using_class_space(), "Has to use class space");
3574  return class_vsm()->calc_chunk_size(word_size);
3575}
3576
3577void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
3578  tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
3579
3580  // If result is still null, we are out of memory.
3581  if (Verbose && TraceMetadataChunkAllocation) {
3582    gclog_or_tty->print_cr("Metaspace allocation failed for size "
3583        SIZE_FORMAT, word_size);
3584    if (loader_data->metaspace_or_null() != NULL) {
3585      loader_data->dump(gclog_or_tty);
3586    }
3587    MetaspaceAux::dump(gclog_or_tty);
3588  }
3589
3590  bool out_of_compressed_class_space = false;
3591  if (is_class_space_allocation(mdtype)) {
3592    Metaspace* metaspace = loader_data->metaspace_non_null();
3593    out_of_compressed_class_space =
3594      MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3595      (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3596      CompressedClassSpaceSize;
3597  }
3598
3599  // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3600  const char* space_string = out_of_compressed_class_space ?
3601    "Compressed class space" : "Metaspace";
3602
3603  report_java_out_of_memory(space_string);
3604
3605  if (JvmtiExport::should_post_resource_exhausted()) {
3606    JvmtiExport::post_resource_exhausted(
3607        JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3608        space_string);
3609  }
3610
3611  if (!is_init_completed()) {
3612    vm_exit_during_initialization("OutOfMemoryError", space_string);
3613  }
3614
3615  if (out_of_compressed_class_space) {
3616    THROW_OOP(Universe::out_of_memory_error_class_metaspace());
3617  } else {
3618    THROW_OOP(Universe::out_of_memory_error_metaspace());
3619  }
3620}
3621
3622const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
3623  switch (mdtype) {
3624    case Metaspace::ClassType: return "Class";
3625    case Metaspace::NonClassType: return "Metadata";
3626    default:
3627      assert(false, err_msg("Got bad mdtype: %d", (int) mdtype));
3628      return NULL;
3629  }
3630}
3631
3632void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3633  assert(DumpSharedSpaces, "sanity");
3634
3635  int byte_size = (int)word_size * HeapWordSize;
3636  AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size);
3637
3638  if (_alloc_record_head == NULL) {
3639    _alloc_record_head = _alloc_record_tail = rec;
3640  } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) {
3641    _alloc_record_tail->_next = rec;
3642    _alloc_record_tail = rec;
3643  } else {
3644    // slow linear search, but this doesn't happen that often, and only when dumping
3645    for (AllocRecord *old = _alloc_record_head; old; old = old->_next) {
3646      if (old->_ptr == ptr) {
3647        assert(old->_type == MetaspaceObj::DeallocatedType, "sanity");
3648        int remain_bytes = old->_byte_size - byte_size;
3649        assert(remain_bytes >= 0, "sanity");
3650        old->_type = type;
3651
3652        if (remain_bytes == 0) {
3653          delete(rec);
3654        } else {
3655          address remain_ptr = address(ptr) + byte_size;
3656          rec->_ptr = remain_ptr;
3657          rec->_byte_size = remain_bytes;
3658          rec->_type = MetaspaceObj::DeallocatedType;
3659          rec->_next = old->_next;
3660          old->_byte_size = byte_size;
3661          old->_next = rec;
3662        }
3663        return;
3664      }
3665    }
3666    assert(0, "reallocating a freed pointer that was not recorded");
3667  }
3668}
3669
3670void Metaspace::record_deallocation(void* ptr, size_t word_size) {
3671  assert(DumpSharedSpaces, "sanity");
3672
3673  for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3674    if (rec->_ptr == ptr) {
3675      assert(rec->_byte_size == (int)word_size * HeapWordSize, "sanity");
3676      rec->_type = MetaspaceObj::DeallocatedType;
3677      return;
3678    }
3679  }
3680
3681  assert(0, "deallocating a pointer that was not recorded");
3682}
3683
3684void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3685  assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3686
3687  address last_addr = (address)bottom();
3688
3689  for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3690    address ptr = rec->_ptr;
3691    if (last_addr < ptr) {
3692      closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
3693    }
3694    closure->doit(ptr, rec->_type, rec->_byte_size);
3695    last_addr = ptr + rec->_byte_size;
3696  }
3697
3698  address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
3699  if (last_addr < top) {
3700    closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
3701  }
3702}
3703
3704void Metaspace::purge(MetadataType mdtype) {
3705  get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
3706}
3707
3708void Metaspace::purge() {
3709  MutexLockerEx cl(SpaceManager::expand_lock(),
3710                   Mutex::_no_safepoint_check_flag);
3711  purge(NonClassType);
3712  if (using_class_space()) {
3713    purge(ClassType);
3714  }
3715}
3716
3717void Metaspace::print_on(outputStream* out) const {
3718  // Print both class virtual space counts and metaspace.
3719  if (Verbose) {
3720    vsm()->print_on(out);
3721    if (using_class_space()) {
3722      class_vsm()->print_on(out);
3723    }
3724  }
3725}
3726
3727bool Metaspace::contains(const void* ptr) {
3728  if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) {
3729    return true;
3730  }
3731
3732  if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
3733     return true;
3734  }
3735
3736  return get_space_list(NonClassType)->contains(ptr);
3737}
3738
3739void Metaspace::verify() {
3740  vsm()->verify();
3741  if (using_class_space()) {
3742    class_vsm()->verify();
3743  }
3744}
3745
3746void Metaspace::dump(outputStream* const out) const {
3747  out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm()));
3748  vsm()->dump(out);
3749  if (using_class_space()) {
3750    out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm()));
3751    class_vsm()->dump(out);
3752  }
3753}
3754
3755/////////////// Unit tests ///////////////
3756
3757#ifndef PRODUCT
3758
3759class TestMetaspaceAuxTest : AllStatic {
3760 public:
3761  static void test_reserved() {
3762    size_t reserved = MetaspaceAux::reserved_bytes();
3763
3764    assert(reserved > 0, "assert");
3765
3766    size_t committed  = MetaspaceAux::committed_bytes();
3767    assert(committed <= reserved, "assert");
3768
3769    size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
3770    assert(reserved_metadata > 0, "assert");
3771    assert(reserved_metadata <= reserved, "assert");
3772
3773    if (UseCompressedClassPointers) {
3774      size_t reserved_class    = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
3775      assert(reserved_class > 0, "assert");
3776      assert(reserved_class < reserved, "assert");
3777    }
3778  }
3779
3780  static void test_committed() {
3781    size_t committed = MetaspaceAux::committed_bytes();
3782
3783    assert(committed > 0, "assert");
3784
3785    size_t reserved  = MetaspaceAux::reserved_bytes();
3786    assert(committed <= reserved, "assert");
3787
3788    size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
3789    assert(committed_metadata > 0, "assert");
3790    assert(committed_metadata <= committed, "assert");
3791
3792    if (UseCompressedClassPointers) {
3793      size_t committed_class    = MetaspaceAux::committed_bytes(Metaspace::ClassType);
3794      assert(committed_class > 0, "assert");
3795      assert(committed_class < committed, "assert");
3796    }
3797  }
3798
3799  static void test_virtual_space_list_large_chunk() {
3800    VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
3801    MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3802    // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
3803    // vm_allocation_granularity aligned on Windows.
3804    size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
3805    large_size += (os::vm_page_size()/BytesPerWord);
3806    vs_list->get_new_chunk(large_size, large_size, 0);
3807  }
3808
3809  static void test() {
3810    test_reserved();
3811    test_committed();
3812    test_virtual_space_list_large_chunk();
3813  }
3814};
3815
3816void TestMetaspaceAux_test() {
3817  TestMetaspaceAuxTest::test();
3818}
3819
3820class TestVirtualSpaceNodeTest {
3821  static void chunk_up(size_t words_left, size_t& num_medium_chunks,
3822                                          size_t& num_small_chunks,
3823                                          size_t& num_specialized_chunks) {
3824    num_medium_chunks = words_left / MediumChunk;
3825    words_left = words_left % MediumChunk;
3826
3827    num_small_chunks = words_left / SmallChunk;
3828    words_left = words_left % SmallChunk;
3829    // how many specialized chunks can we get?
3830    num_specialized_chunks = words_left / SpecializedChunk;
3831    assert(words_left % SpecializedChunk == 0, "should be nothing left");
3832  }
3833
3834 public:
3835  static void test() {
3836    MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3837    const size_t vsn_test_size_words = MediumChunk  * 4;
3838    const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
3839
3840    // The chunk sizes must be multiples of eachother, or this will fail
3841    STATIC_ASSERT(MediumChunk % SmallChunk == 0);
3842    STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
3843
3844    { // No committed memory in VSN
3845      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3846      VirtualSpaceNode vsn(vsn_test_size_bytes);
3847      vsn.initialize();
3848      vsn.retire(&cm);
3849      assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
3850    }
3851
3852    { // All of VSN is committed, half is used by chunks
3853      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3854      VirtualSpaceNode vsn(vsn_test_size_bytes);
3855      vsn.initialize();
3856      vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
3857      vsn.get_chunk_vs(MediumChunk);
3858      vsn.get_chunk_vs(MediumChunk);
3859      vsn.retire(&cm);
3860      assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
3861      assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
3862    }
3863
3864    const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
3865    // This doesn't work for systems with vm_page_size >= 16K.
3866    if (page_chunks < MediumChunk) {
3867      // 4 pages of VSN is committed, some is used by chunks
3868      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3869      VirtualSpaceNode vsn(vsn_test_size_bytes);
3870
3871      vsn.initialize();
3872      vsn.expand_by(page_chunks, page_chunks);
3873      vsn.get_chunk_vs(SmallChunk);
3874      vsn.get_chunk_vs(SpecializedChunk);
3875      vsn.retire(&cm);
3876
3877      // committed - used = words left to retire
3878      const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
3879
3880      size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3881      chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3882
3883      assert(num_medium_chunks == 0, "should not get any medium chunks");
3884      assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3885      assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3886    }
3887
3888    { // Half of VSN is committed, a humongous chunk is used
3889      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3890      VirtualSpaceNode vsn(vsn_test_size_bytes);
3891      vsn.initialize();
3892      vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
3893      vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
3894      vsn.retire(&cm);
3895
3896      const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
3897      size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3898      chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3899
3900      assert(num_medium_chunks == 0, "should not get any medium chunks");
3901      assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3902      assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3903    }
3904
3905  }
3906
3907#define assert_is_available_positive(word_size) \
3908  assert(vsn.is_available(word_size), \
3909    err_msg(#word_size ": " PTR_FORMAT " bytes were not available in " \
3910            "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3911            (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end())));
3912
3913#define assert_is_available_negative(word_size) \
3914  assert(!vsn.is_available(word_size), \
3915    err_msg(#word_size ": " PTR_FORMAT " bytes should not be available in " \
3916            "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3917            (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end())));
3918
3919  static void test_is_available_positive() {
3920    // Reserve some memory.
3921    VirtualSpaceNode vsn(os::vm_allocation_granularity());
3922    assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3923
3924    // Commit some memory.
3925    size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3926    bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3927    assert(expanded, "Failed to commit");
3928
3929    // Check that is_available accepts the committed size.
3930    assert_is_available_positive(commit_word_size);
3931
3932    // Check that is_available accepts half the committed size.
3933    size_t expand_word_size = commit_word_size / 2;
3934    assert_is_available_positive(expand_word_size);
3935  }
3936
3937  static void test_is_available_negative() {
3938    // Reserve some memory.
3939    VirtualSpaceNode vsn(os::vm_allocation_granularity());
3940    assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3941
3942    // Commit some memory.
3943    size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3944    bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3945    assert(expanded, "Failed to commit");
3946
3947    // Check that is_available doesn't accept a too large size.
3948    size_t two_times_commit_word_size = commit_word_size * 2;
3949    assert_is_available_negative(two_times_commit_word_size);
3950  }
3951
3952  static void test_is_available_overflow() {
3953    // Reserve some memory.
3954    VirtualSpaceNode vsn(os::vm_allocation_granularity());
3955    assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3956
3957    // Commit some memory.
3958    size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3959    bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3960    assert(expanded, "Failed to commit");
3961
3962    // Calculate a size that will overflow the virtual space size.
3963    void* virtual_space_max = (void*)(uintptr_t)-1;
3964    size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
3965    size_t overflow_size = bottom_to_max + BytesPerWord;
3966    size_t overflow_word_size = overflow_size / BytesPerWord;
3967
3968    // Check that is_available can handle the overflow.
3969    assert_is_available_negative(overflow_word_size);
3970  }
3971
3972  static void test_is_available() {
3973    TestVirtualSpaceNodeTest::test_is_available_positive();
3974    TestVirtualSpaceNodeTest::test_is_available_negative();
3975    TestVirtualSpaceNodeTest::test_is_available_overflow();
3976  }
3977};
3978
3979void TestVirtualSpaceNode_test() {
3980  TestVirtualSpaceNodeTest::test();
3981  TestVirtualSpaceNodeTest::test_is_available();
3982}
3983#endif
3984