1/*
2 * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/classListParser.hpp"
27#include "classfile/classLoaderExt.hpp"
28#include "classfile/dictionary.hpp"
29#include "classfile/loaderConstraints.hpp"
30#include "classfile/placeholders.hpp"
31#include "classfile/sharedClassUtil.hpp"
32#include "classfile/symbolTable.hpp"
33#include "classfile/stringTable.hpp"
34#include "classfile/systemDictionary.hpp"
35#include "classfile/systemDictionaryShared.hpp"
36#include "code/codeCache.hpp"
37#if INCLUDE_ALL_GCS
38#include "gc/g1/g1Allocator.inline.hpp"
39#include "gc/g1/g1CollectedHeap.hpp"
40#include "gc/g1/g1SATBCardTableModRefBS.hpp"
41#endif
42#include "gc/shared/gcLocker.hpp"
43#include "interpreter/bytecodeStream.hpp"
44#include "interpreter/bytecodes.hpp"
45#include "logging/log.hpp"
46#include "logging/logMessage.hpp"
47#include "memory/filemap.hpp"
48#include "memory/metaspace.hpp"
49#include "memory/metaspaceShared.hpp"
50#include "memory/resourceArea.hpp"
51#include "oops/instanceClassLoaderKlass.hpp"
52#include "oops/instanceMirrorKlass.hpp"
53#include "oops/instanceRefKlass.hpp"
54#include "oops/objArrayKlass.hpp"
55#include "oops/objArrayOop.hpp"
56#include "oops/oop.inline.hpp"
57#include "oops/typeArrayKlass.hpp"
58#include "prims/jvm.h"
59#include "prims/jvmtiRedefineClasses.hpp"
60#include "runtime/timerTrace.hpp"
61#include "runtime/os.hpp"
62#include "runtime/signature.hpp"
63#include "runtime/vmThread.hpp"
64#include "runtime/vm_operations.hpp"
65#include "utilities/align.hpp"
66#include "utilities/defaultStream.hpp"
67#include "utilities/hashtable.inline.hpp"
68#include "memory/metaspaceClosure.hpp"
69
70ReservedSpace MetaspaceShared::_shared_rs;
71VirtualSpace MetaspaceShared::_shared_vs;
72MetaspaceSharedStats MetaspaceShared::_stats;
73bool MetaspaceShared::_has_error_classes;
74bool MetaspaceShared::_archive_loading_failed = false;
75bool MetaspaceShared::_remapped_readwrite = false;
76bool MetaspaceShared::_open_archive_heap_region_mapped = false;
77address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL;
78size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0;
79size_t MetaspaceShared::_core_spaces_size = 0;
80
81// The CDS archive is divided into the following regions:
82//     mc  - misc code (the method entry trampolines)
83//     rw  - read-write metadata
84//     ro  - read-only metadata and read-only tables
85//     md  - misc data (the c++ vtables)
86//     od  - optional data (original class files)
87//
88//     s0  - shared strings(closed archive heap space) #0
89//     s1  - shared strings(closed archive heap space) #1 (may be empty)
90//     oa0 - open archive heap space #0
91//     oa1 - open archive heap space #1 (may be empty)
92//
93// The mc, rw, ro, md and od regions are linearly allocated, starting from
94// SharedBaseAddress, in the order of mc->rw->ro->md->od. The size of these 5 regions
95// are page-aligned, and there's no gap between any consecutive regions.
96//
97// These 5 regions are populated in the following steps:
98// [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are
99//     temporarily allocated outside of the shared regions. Only the method entry
100//     trampolines are written into the mc region.
101// [2] ArchiveCompactor copies RW metadata into the rw region.
102// [3] ArchiveCompactor copies RO metadata into the ro region.
103// [4] SymbolTable, StringTable, SystemDictionary, and a few other read-only data
104//     are copied into the ro region as read-only tables.
105// [5] C++ vtables are copied into the md region.
106// [6] Original class files are copied into the od region.
107//
108// The s0/s1 and oa0/oa1 regions are populated inside MetaspaceShared::dump_java_heap_objects.
109// Their layout is independent of the other 5 regions.
110
111class DumpRegion {
112private:
113  const char* _name;
114  char* _base;
115  char* _top;
116  char* _end;
117  bool _is_packed;
118
119  char* expand_top_to(char* newtop) {
120    assert(is_allocatable(), "must be initialized and not packed");
121    assert(newtop >= _top, "must not grow backwards");
122    if (newtop > _end) {
123      MetaspaceShared::report_out_of_space(_name, newtop - _top);
124      ShouldNotReachHere();
125    }
126    MetaspaceShared::commit_shared_space_to(newtop);
127    _top = newtop;
128    return _top;
129  }
130
131public:
132  DumpRegion(const char* name) : _name(name), _base(NULL), _top(NULL), _end(NULL), _is_packed(false) {}
133
134  char* allocate(size_t num_bytes, size_t alignment=BytesPerWord) {
135    char* p = (char*)align_up(_top, alignment);
136    char* newtop = p + align_up(num_bytes, alignment);
137    expand_top_to(newtop);
138    memset(p, 0, newtop - p);
139    return p;
140  }
141
142  void append_intptr_t(intptr_t n) {
143    assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment");
144    intptr_t *p = (intptr_t*)_top;
145    char* newtop = _top + sizeof(intptr_t);
146    expand_top_to(newtop);
147    *p = n;
148  }
149
150  char* base()      const { return _base;        }
151  char* top()       const { return _top;         }
152  char* end()       const { return _end;         }
153  size_t reserved() const { return _end - _base; }
154  size_t used()     const { return _top - _base; }
155  bool is_packed()  const { return _is_packed;   }
156  bool is_allocatable() const {
157    return !is_packed() && _base != NULL;
158  }
159
160  double perc(size_t used, size_t total) const {
161    if (total == 0) {
162      total = 1;
163    }
164    return used / double(total) * 100.0;
165  }
166
167  void print(size_t total_bytes) const {
168    tty->print_cr("%s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
169                  _name, used(), perc(used(), total_bytes), reserved(), perc(used(), reserved()), p2i(_base));
170  }
171  void print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
172    tty->print("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d",
173               _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base));
174    if (strcmp(_name, failing_region) == 0) {
175      tty->print_cr(" required = %d", int(needed_bytes));
176    } else {
177      tty->cr();
178    }
179  }
180
181  void init(const ReservedSpace* rs) {
182    _base = _top = rs->base();
183    _end = rs->end();
184  }
185  void init(char* b, char* t, char* e) {
186    _base = b;
187    _top = t;
188    _end = e;
189  }
190
191  void pack(DumpRegion* next = NULL) {
192    assert(!is_packed(), "sanity");
193    _end = (char*)align_up(_top, Metaspace::reserve_alignment());
194    _is_packed = true;
195    if (next != NULL) {
196      next->_base = next->_top = this->_end;
197      next->_end = MetaspaceShared::shared_rs()->end();
198    }
199  }
200  bool contains(char* p) {
201    return base() <= p && p < top();
202  }
203};
204
205
206DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"), _od_region("od");
207size_t _total_string_region_size = 0, _total_open_archive_region_size = 0;
208
209char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) {
210  return _mc_region.allocate(num_bytes);
211}
212
213char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) {
214  return _ro_region.allocate(num_bytes);
215}
216
217void MetaspaceShared::initialize_shared_rs() {
218  const size_t reserve_alignment = Metaspace::reserve_alignment();
219  bool large_pages = false; // No large pages when dumping the CDS archive.
220  char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment);
221
222#ifdef _LP64
223  // On 64-bit VM, the heap and class space layout will be the same as if
224  // you're running in -Xshare:on mode:
225  //
226  //                         +-- SharedBaseAddress (default = 0x800000000)
227  //                         v
228  // +-..---------+----+ ... +----+----+----+----+----+---------------+
229  // |    Heap    | ST |     | MC | RW | RO | MD | OD | class space   |
230  // +-..---------+----+ ... +----+----+----+----+----+---------------+
231  // |<--MaxHeapSize->|     |<-- UnscaledClassSpaceMax = 4GB ------->|
232  //
233  const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
234  const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment);
235#else
236  // We don't support archives larger than 256MB on 32-bit due to limited virtual address space.
237  size_t cds_total = align_down(256*M, reserve_alignment);
238#endif
239
240  // First try to reserve the space at the specified SharedBaseAddress.
241  _shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages, shared_base);
242  if (_shared_rs.is_reserved()) {
243    assert(shared_base == 0 || _shared_rs.base() == shared_base, "should match");
244  } else {
245    // Get a mmap region anywhere if the SharedBaseAddress fails.
246    _shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages);
247  }
248  if (!_shared_rs.is_reserved()) {
249    vm_exit_during_initialization("Unable to reserve memory for shared space",
250                                  err_msg(SIZE_FORMAT " bytes.", cds_total));
251  }
252
253#ifdef _LP64
254  // During dump time, we allocate 4GB (UnscaledClassSpaceMax) of space and split it up:
255  // + The upper 1 GB is used as the "temporary compressed class space" -- preload_classes()
256  //   will store Klasses into this space.
257  // + The lower 3 GB is used for the archive -- when preload_classes() is done,
258  //   ArchiveCompactor will copy the class metadata into this space, first the RW parts,
259  //   then the RO parts.
260
261  assert(UseCompressedOops && UseCompressedClassPointers,
262      "UseCompressedOops and UseCompressedClassPointers must be set");
263
264  size_t max_archive_size = align_down(cds_total * 3 / 4, reserve_alignment);
265  ReservedSpace tmp_class_space = _shared_rs.last_part(max_archive_size);
266  CompressedClassSpaceSize = align_down(tmp_class_space.size(), reserve_alignment);
267  _shared_rs = _shared_rs.first_part(max_archive_size);
268
269  // Set up compress class pointers.
270  Universe::set_narrow_klass_base((address)_shared_rs.base());
271  if (UseAOT || cds_total > UnscaledClassSpaceMax) {
272    // AOT forces narrow_klass_shift=LogKlassAlignmentInBytes
273    Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
274  } else {
275    Universe::set_narrow_klass_shift(0);
276  }
277
278  Metaspace::initialize_class_space(tmp_class_space);
279  tty->print_cr("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
280                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
281
282  tty->print_cr("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
283                CompressedClassSpaceSize, p2i(tmp_class_space.base()));
284#endif
285
286  // Start with 0 committed bytes. The memory will be committed as needed by
287  // MetaspaceShared::commit_shared_space_to().
288  if (!_shared_vs.initialize(_shared_rs, 0)) {
289    vm_exit_during_initialization("Unable to allocate memory for shared space");
290  }
291
292  _mc_region.init(&_shared_rs);
293  tty->print_cr("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
294                _shared_rs.size(), p2i(_shared_rs.base()));
295}
296
297void MetaspaceShared::commit_shared_space_to(char* newtop) {
298  assert(DumpSharedSpaces, "dump-time only");
299  char* base = _shared_rs.base();
300  size_t need_committed_size = newtop - base;
301  size_t has_committed_size = _shared_vs.committed_size();
302  if (need_committed_size < has_committed_size) {
303    return;
304  }
305
306  size_t min_bytes = need_committed_size - has_committed_size;
307  size_t preferred_bytes = 1 * M;
308  size_t uncommitted = _shared_vs.reserved_size() - has_committed_size;
309
310  size_t commit = MAX2(min_bytes, preferred_bytes);
311  assert(commit <= uncommitted, "sanity");
312
313  bool result = _shared_vs.expand_by(commit, false);
314  if (!result) {
315    vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes",
316                                          need_committed_size));
317  }
318
319  log_info(cds)("Expanding shared spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9)  " bytes ending at %p]",
320                commit, _shared_vs.actual_committed_size(), _shared_vs.high());
321}
322
323// Read/write a data stream for restoring/preserving metadata pointers and
324// miscellaneous data from/to the shared archive file.
325
326void MetaspaceShared::serialize(SerializeClosure* soc) {
327  int tag = 0;
328  soc->do_tag(--tag);
329
330  // Verify the sizes of various metadata in the system.
331  soc->do_tag(sizeof(Method));
332  soc->do_tag(sizeof(ConstMethod));
333  soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE));
334  soc->do_tag(sizeof(ConstantPool));
335  soc->do_tag(sizeof(ConstantPoolCache));
336  soc->do_tag(objArrayOopDesc::base_offset_in_bytes());
337  soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE));
338  soc->do_tag(sizeof(Symbol));
339
340  // Dump/restore miscellaneous metadata.
341  Universe::serialize(soc, true);
342  soc->do_tag(--tag);
343
344  // Dump/restore references to commonly used names and signatures.
345  vmSymbols::serialize(soc);
346  soc->do_tag(--tag);
347
348  // Dump/restore the symbol and string tables
349  SymbolTable::serialize(soc);
350  StringTable::serialize(soc);
351  soc->do_tag(--tag);
352
353  soc->do_tag(666);
354}
355
356address MetaspaceShared::cds_i2i_entry_code_buffers(size_t total_size) {
357  if (DumpSharedSpaces) {
358    if (_cds_i2i_entry_code_buffers == NULL) {
359      _cds_i2i_entry_code_buffers = (address)misc_code_space_alloc(total_size);
360      _cds_i2i_entry_code_buffers_size = total_size;
361    }
362  } else if (UseSharedSpaces) {
363    assert(_cds_i2i_entry_code_buffers != NULL, "must already been initialized");
364  } else {
365    return NULL;
366  }
367
368  assert(_cds_i2i_entry_code_buffers_size == total_size, "must not change");
369  return _cds_i2i_entry_code_buffers;
370}
371
372// CDS code for dumping shared archive.
373
374// Global object for holding classes that have been loaded.  Since this
375// is run at a safepoint just before exit, this is the entire set of classes.
376static GrowableArray<Klass*>* _global_klass_objects;
377
378static void collect_array_classes(Klass* k) {
379  _global_klass_objects->append_if_missing(k);
380  if (k->is_array_klass()) {
381    // Add in the array classes too
382    ArrayKlass* ak = ArrayKlass::cast(k);
383    Klass* h = ak->higher_dimension();
384    if (h != NULL) {
385      h->array_klasses_do(collect_array_classes);
386    }
387  }
388}
389
390class CollectClassesClosure : public KlassClosure {
391  void do_klass(Klass* k) {
392    if (!(k->is_instance_klass() && InstanceKlass::cast(k)->is_in_error_state())) {
393      _global_klass_objects->append_if_missing(k);
394    }
395    if (k->is_array_klass()) {
396      // Add in the array classes too
397      ArrayKlass* ak = ArrayKlass::cast(k);
398      Klass* h = ak->higher_dimension();
399      if (h != NULL) {
400        h->array_klasses_do(collect_array_classes);
401      }
402    }
403  }
404};
405
406static void remove_unshareable_in_classes() {
407  for (int i = 0; i < _global_klass_objects->length(); i++) {
408    Klass* k = _global_klass_objects->at(i);
409    if (!k->is_objArray_klass()) {
410      // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info
411      // on their array classes.
412      assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be");
413      k->remove_unshareable_info();
414    }
415  }
416}
417
418static void remove_java_mirror_in_classes() {
419  for (int i = 0; i < _global_klass_objects->length(); i++) {
420    Klass* k = _global_klass_objects->at(i);
421    if (!k->is_objArray_klass()) {
422      // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info
423      // on their array classes.
424      assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be");
425      k->remove_java_mirror();
426    }
427  }
428}
429
430static void rewrite_nofast_bytecode(Method* method) {
431  BytecodeStream bcs(method);
432  while (!bcs.is_last_bytecode()) {
433    Bytecodes::Code opcode = bcs.next();
434    switch (opcode) {
435    case Bytecodes::_getfield:      *bcs.bcp() = Bytecodes::_nofast_getfield;      break;
436    case Bytecodes::_putfield:      *bcs.bcp() = Bytecodes::_nofast_putfield;      break;
437    case Bytecodes::_aload_0:       *bcs.bcp() = Bytecodes::_nofast_aload_0;       break;
438    case Bytecodes::_iload: {
439      if (!bcs.is_wide()) {
440        *bcs.bcp() = Bytecodes::_nofast_iload;
441      }
442      break;
443    }
444    default: break;
445    }
446  }
447}
448
449// Walk all methods in the class list to ensure that they won't be modified at
450// run time. This includes:
451// [1] Rewrite all bytecodes as needed, so that the ConstMethod* will not be modified
452//     at run time by RewriteBytecodes/RewriteFrequentPairs
453// [2] Assign a fingerprint, so one doesn't need to be assigned at run-time.
454static void rewrite_nofast_bytecodes_and_calculate_fingerprints() {
455  for (int i = 0; i < _global_klass_objects->length(); i++) {
456    Klass* k = _global_klass_objects->at(i);
457    if (k->is_instance_klass()) {
458      InstanceKlass* ik = InstanceKlass::cast(k);
459      for (int i = 0; i < ik->methods()->length(); i++) {
460        Method* m = ik->methods()->at(i);
461        rewrite_nofast_bytecode(m);
462        Fingerprinter fp(m);
463        // The side effect of this call sets method's fingerprint field.
464        fp.fingerprint();
465      }
466    }
467  }
468}
469
470static void relocate_cached_class_file() {
471  for (int i = 0; i < _global_klass_objects->length(); i++) {
472    Klass* k = _global_klass_objects->at(i);
473    if (k->is_instance_klass()) {
474      InstanceKlass* ik = InstanceKlass::cast(k);
475      JvmtiCachedClassFileData* p = ik->get_archived_class_data();
476      if (p != NULL) {
477        int size = offset_of(JvmtiCachedClassFileData, data) + p->length;
478        JvmtiCachedClassFileData* q = (JvmtiCachedClassFileData*)_od_region.allocate(size);
479        q->length = p->length;
480        memcpy(q->data, p->data, p->length);
481        ik->set_archived_class_data(q);
482      }
483    }
484  }
485}
486
487NOT_PRODUCT(
488static void assert_not_anonymous_class(InstanceKlass* k) {
489  assert(!(k->is_anonymous()), "cannot archive anonymous classes");
490}
491
492// Anonymous classes are not stored inside any dictionaries. They are created by
493// SystemDictionary::parse_stream() with a non-null host_klass.
494static void assert_no_anonymoys_classes_in_dictionaries() {
495  ClassLoaderDataGraph::dictionary_classes_do(assert_not_anonymous_class);
496})
497
498// Objects of the Metadata types (such as Klass and ConstantPool) have C++ vtables.
499// (In GCC this is the field <Type>::_vptr, i.e., first word in the object.)
500//
501// Addresses of the vtables and the methods may be different across JVM runs,
502// if libjvm.so is dynamically loaded at a different base address.
503//
504// To ensure that the Metadata objects in the CDS archive always have the correct vtable:
505//
506// + at dump time:  we redirect the _vptr to point to our own vtables inside
507//                  the CDS image
508// + at run time:   we clone the actual contents of the vtables from libjvm.so
509//                  into our own tables.
510
511// Currently, the archive contain ONLY the following types of objects that have C++ vtables.
512#define CPP_VTABLE_PATCH_TYPES_DO(f) \
513  f(ConstantPool) \
514  f(InstanceKlass) \
515  f(InstanceClassLoaderKlass) \
516  f(InstanceMirrorKlass) \
517  f(InstanceRefKlass) \
518  f(Method) \
519  f(ObjArrayKlass) \
520  f(TypeArrayKlass)
521
522class CppVtableInfo {
523  intptr_t _vtable_size;
524  intptr_t _cloned_vtable[1];
525public:
526  static int num_slots(int vtable_size) {
527    return 1 + vtable_size; // Need to add the space occupied by _vtable_size;
528  }
529  int vtable_size()           { return int(uintx(_vtable_size)); }
530  void set_vtable_size(int n) { _vtable_size = intptr_t(n); }
531  intptr_t* cloned_vtable()   { return &_cloned_vtable[0]; }
532  void zero()                 { memset(_cloned_vtable, 0, sizeof(intptr_t) * vtable_size()); }
533  // Returns the address of the next CppVtableInfo that can be placed immediately after this CppVtableInfo
534  static size_t byte_size(int vtable_size) {
535    CppVtableInfo i;
536    return pointer_delta(&i._cloned_vtable[vtable_size], &i, sizeof(u1));
537  }
538};
539
540template <class T> class CppVtableCloner : public T {
541  static intptr_t* vtable_of(Metadata& m) {
542    return *((intptr_t**)&m);
543  }
544  static CppVtableInfo* _info;
545
546  static int get_vtable_length(const char* name);
547
548public:
549  // Allocate and initialize the C++ vtable, starting from top, but do not go past end.
550  static intptr_t* allocate(const char* name);
551
552  // Clone the vtable to ...
553  static intptr_t* clone_vtable(const char* name, CppVtableInfo* info);
554
555  static void zero_vtable_clone() {
556    assert(DumpSharedSpaces, "dump-time only");
557    _info->zero();
558  }
559
560  // Switch the vtable pointer to point to the cloned vtable.
561  static void patch(Metadata* obj) {
562    assert(DumpSharedSpaces, "dump-time only");
563    *(void**)obj = (void*)(_info->cloned_vtable());
564  }
565
566  static bool is_valid_shared_object(const T* obj) {
567    intptr_t* vptr = *(intptr_t**)obj;
568    return vptr == _info->cloned_vtable();
569  }
570};
571
572template <class T> CppVtableInfo* CppVtableCloner<T>::_info = NULL;
573
574template <class T>
575intptr_t* CppVtableCloner<T>::allocate(const char* name) {
576  assert(is_aligned(_md_region.top(), sizeof(intptr_t)), "bad alignment");
577  int n = get_vtable_length(name);
578  _info = (CppVtableInfo*)_md_region.allocate(CppVtableInfo::byte_size(n), sizeof(intptr_t));
579  _info->set_vtable_size(n);
580
581  intptr_t* p = clone_vtable(name, _info);
582  assert((char*)p == _md_region.top(), "must be");
583
584  return p;
585}
586
587template <class T>
588intptr_t* CppVtableCloner<T>::clone_vtable(const char* name, CppVtableInfo* info) {
589  if (!DumpSharedSpaces) {
590    assert(_info == 0, "_info is initialized only at dump time");
591    _info = info; // Remember it -- it will be used by MetaspaceShared::is_valid_shared_method()
592  }
593  T tmp; // Allocate temporary dummy metadata object to get to the original vtable.
594  int n = info->vtable_size();
595  intptr_t* srcvtable = vtable_of(tmp);
596  intptr_t* dstvtable = info->cloned_vtable();
597
598  // We already checked (and, if necessary, adjusted n) when the vtables were allocated, so we are
599  // safe to do memcpy.
600  log_debug(cds, vtables)("Copying %3d vtable entries for %s", n, name);
601  memcpy(dstvtable, srcvtable, sizeof(intptr_t) * n);
602  return dstvtable + n;
603}
604
605// To determine the size of the vtable for each type, we use the following
606// trick by declaring 2 subclasses:
607//
608//   class CppVtableTesterA: public InstanceKlass {virtual int   last_virtual_method() {return 1;}    };
609//   class CppVtableTesterB: public InstanceKlass {virtual void* last_virtual_method() {return NULL}; };
610//
611// CppVtableTesterA and CppVtableTesterB's vtables have the following properties:
612// - Their size (N+1) is exactly one more than the size of InstanceKlass's vtable (N)
613// - The first N entries have are exactly the same as in InstanceKlass's vtable.
614// - Their last entry is different.
615//
616// So to determine the value of N, we just walk CppVtableTesterA and CppVtableTesterB's tables
617// and find the first entry that's different.
618//
619// This works on all C++ compilers supported by Oracle, but you may need to tweak it for more
620// esoteric compilers.
621
622template <class T> class CppVtableTesterB: public T {
623public:
624  virtual int last_virtual_method() {return 1;}
625};
626
627template <class T> class CppVtableTesterA : public T {
628public:
629  virtual void* last_virtual_method() {
630    // Make this different than CppVtableTesterB::last_virtual_method so the C++
631    // compiler/linker won't alias the two functions.
632    return NULL;
633  }
634};
635
636template <class T>
637int CppVtableCloner<T>::get_vtable_length(const char* name) {
638  CppVtableTesterA<T> a;
639  CppVtableTesterB<T> b;
640
641  intptr_t* avtable = vtable_of(a);
642  intptr_t* bvtable = vtable_of(b);
643
644  // Start at slot 1, because slot 0 may be RTTI (on Solaris/Sparc)
645  int vtable_len = 1;
646  for (; ; vtable_len++) {
647    if (avtable[vtable_len] != bvtable[vtable_len]) {
648      break;
649    }
650  }
651  log_debug(cds, vtables)("Found   %3d vtable entries for %s", vtable_len, name);
652
653  return vtable_len;
654}
655
656#define ALLOC_CPP_VTABLE_CLONE(c) \
657  CppVtableCloner<c>::allocate(#c);
658
659#define CLONE_CPP_VTABLE(c) \
660  p = CppVtableCloner<c>::clone_vtable(#c, (CppVtableInfo*)p);
661
662#define ZERO_CPP_VTABLE(c) \
663 CppVtableCloner<c>::zero_vtable_clone();
664
665// This can be called at both dump time and run time.
666intptr_t* MetaspaceShared::clone_cpp_vtables(intptr_t* p) {
667  assert(DumpSharedSpaces || UseSharedSpaces, "sanity");
668  CPP_VTABLE_PATCH_TYPES_DO(CLONE_CPP_VTABLE);
669  return p;
670}
671
672void MetaspaceShared::zero_cpp_vtable_clones_for_writing() {
673  assert(DumpSharedSpaces, "dump-time only");
674  CPP_VTABLE_PATCH_TYPES_DO(ZERO_CPP_VTABLE);
675}
676
677// Allocate and initialize the C++ vtables, starting from top, but do not go past end.
678void MetaspaceShared::allocate_cpp_vtable_clones() {
679  assert(DumpSharedSpaces, "dump-time only");
680  // Layout (each slot is a intptr_t):
681  //   [number of slots in the first vtable = n1]
682  //   [ <n1> slots for the first vtable]
683  //   [number of slots in the first second = n2]
684  //   [ <n2> slots for the second vtable]
685  //   ...
686  // The order of the vtables is the same as the CPP_VTAB_PATCH_TYPES_DO macro.
687  CPP_VTABLE_PATCH_TYPES_DO(ALLOC_CPP_VTABLE_CLONE);
688}
689
690// Switch the vtable pointer to point to the cloned vtable. We assume the
691// vtable pointer is in first slot in object.
692void MetaspaceShared::patch_cpp_vtable_pointers() {
693  int n = _global_klass_objects->length();
694  for (int i = 0; i < n; i++) {
695    Klass* obj = _global_klass_objects->at(i);
696    if (obj->is_instance_klass()) {
697      InstanceKlass* ik = InstanceKlass::cast(obj);
698      if (ik->is_class_loader_instance_klass()) {
699        CppVtableCloner<InstanceClassLoaderKlass>::patch(ik);
700      } else if (ik->is_reference_instance_klass()) {
701        CppVtableCloner<InstanceRefKlass>::patch(ik);
702      } else if (ik->is_mirror_instance_klass()) {
703        CppVtableCloner<InstanceMirrorKlass>::patch(ik);
704      } else {
705        CppVtableCloner<InstanceKlass>::patch(ik);
706      }
707      ConstantPool* cp = ik->constants();
708      CppVtableCloner<ConstantPool>::patch(cp);
709      for (int j = 0; j < ik->methods()->length(); j++) {
710        Method* m = ik->methods()->at(j);
711        CppVtableCloner<Method>::patch(m);
712        assert(CppVtableCloner<Method>::is_valid_shared_object(m), "must be");
713      }
714    } else if (obj->is_objArray_klass()) {
715      CppVtableCloner<ObjArrayKlass>::patch(obj);
716    } else {
717      assert(obj->is_typeArray_klass(), "sanity");
718      CppVtableCloner<TypeArrayKlass>::patch(obj);
719    }
720  }
721}
722
723bool MetaspaceShared::is_valid_shared_method(const Method* m) {
724  assert(is_in_shared_space(m), "must be");
725  return CppVtableCloner<Method>::is_valid_shared_object(m);
726}
727
728// Closure for serializing initialization data out to a data area to be
729// written to the shared file.
730
731class WriteClosure : public SerializeClosure {
732private:
733  DumpRegion* _dump_region;
734
735public:
736  WriteClosure(DumpRegion* r) {
737    _dump_region = r;
738  }
739
740  void do_ptr(void** p) {
741    _dump_region->append_intptr_t((intptr_t)*p);
742  }
743
744  void do_u4(u4* p) {
745    void* ptr = (void*)(uintx(*p));
746    do_ptr(&ptr);
747  }
748
749  void do_tag(int tag) {
750    _dump_region->append_intptr_t((intptr_t)tag);
751  }
752
753  void do_region(u_char* start, size_t size) {
754    assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
755    assert(size % sizeof(intptr_t) == 0, "bad size");
756    do_tag((int)size);
757    while (size > 0) {
758      _dump_region->append_intptr_t(*(intptr_t*)start);
759      start += sizeof(intptr_t);
760      size -= sizeof(intptr_t);
761    }
762  }
763
764  bool reading() const { return false; }
765};
766
767// This is for dumping detailed statistics for the allocations
768// in the shared spaces.
769class DumpAllocStats : public ResourceObj {
770public:
771
772  // Here's poor man's enum inheritance
773#define SHAREDSPACE_OBJ_TYPES_DO(f) \
774  METASPACE_OBJ_TYPES_DO(f) \
775  f(SymbolHashentry) \
776  f(SymbolBucket) \
777  f(StringHashentry) \
778  f(StringBucket) \
779  f(Other)
780
781  enum Type {
782    // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
783    SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE)
784    _number_of_types
785  };
786
787  static const char * type_name(Type type) {
788    switch(type) {
789    SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
790    default:
791      ShouldNotReachHere();
792      return NULL;
793    }
794  }
795
796public:
797  enum { RO = 0, RW = 1 };
798
799  int _counts[2][_number_of_types];
800  int _bytes [2][_number_of_types];
801
802  DumpAllocStats() {
803    memset(_counts, 0, sizeof(_counts));
804    memset(_bytes,  0, sizeof(_bytes));
805  };
806
807  void record(MetaspaceObj::Type type, int byte_size, bool read_only) {
808    assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity");
809    int which = (read_only) ? RO : RW;
810    _counts[which][type] ++;
811    _bytes [which][type] += byte_size;
812  }
813
814  void record_other_type(int byte_size, bool read_only) {
815    int which = (read_only) ? RO : RW;
816    _bytes [which][OtherType] += byte_size;
817  }
818  void print_stats(int ro_all, int rw_all, int mc_all, int md_all);
819};
820
821void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all, int md_all) {
822  // Calculate size of data that was not allocated by Metaspace::allocate()
823  MetaspaceSharedStats *stats = MetaspaceShared::stats();
824
825  // symbols
826  _counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count;
827  _bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes;
828
829  _counts[RO][SymbolBucketType] = stats->symbol.bucket_count;
830  _bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes;
831
832  // strings
833  _counts[RO][StringHashentryType] = stats->string.hashentry_count;
834  _bytes [RO][StringHashentryType] = stats->string.hashentry_bytes;
835
836  _counts[RO][StringBucketType] = stats->string.bucket_count;
837  _bytes [RO][StringBucketType] = stats->string.bucket_bytes;
838
839  // TODO: count things like dictionary, vtable, etc
840  _bytes[RW][OtherType] += mc_all + md_all;
841  rw_all += mc_all + md_all; // mc/md are mapped Read/Write
842
843  // prevent divide-by-zero
844  if (ro_all < 1) {
845    ro_all = 1;
846  }
847  if (rw_all < 1) {
848    rw_all = 1;
849  }
850
851  int all_ro_count = 0;
852  int all_ro_bytes = 0;
853  int all_rw_count = 0;
854  int all_rw_bytes = 0;
855
856// To make fmt_stats be a syntactic constant (for format warnings), use #define.
857#define fmt_stats "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f"
858  const char *sep = "--------------------+---------------------------+---------------------------+--------------------------";
859  const char *hdr = "                        ro_cnt   ro_bytes     % |   rw_cnt   rw_bytes     % |  all_cnt  all_bytes     %";
860
861  ResourceMark rm;
862  LogMessage(cds) msg;
863  stringStream info_stream;
864
865  info_stream.print_cr("Detailed metadata info (excluding od/st regions; rw stats include md/mc regions):");
866  info_stream.print_cr("%s", hdr);
867  info_stream.print_cr("%s", sep);
868  for (int type = 0; type < int(_number_of_types); type ++) {
869    const char *name = type_name((Type)type);
870    int ro_count = _counts[RO][type];
871    int ro_bytes = _bytes [RO][type];
872    int rw_count = _counts[RW][type];
873    int rw_bytes = _bytes [RW][type];
874    int count = ro_count + rw_count;
875    int bytes = ro_bytes + rw_bytes;
876
877    double ro_perc = 100.0 * double(ro_bytes) / double(ro_all);
878    double rw_perc = 100.0 * double(rw_bytes) / double(rw_all);
879    double perc    = 100.0 * double(bytes)    / double(ro_all + rw_all);
880
881    info_stream.print_cr(fmt_stats, name,
882                         ro_count, ro_bytes, ro_perc,
883                         rw_count, rw_bytes, rw_perc,
884                         count, bytes, perc);
885
886    all_ro_count += ro_count;
887    all_ro_bytes += ro_bytes;
888    all_rw_count += rw_count;
889    all_rw_bytes += rw_bytes;
890  }
891
892  int all_count = all_ro_count + all_rw_count;
893  int all_bytes = all_ro_bytes + all_rw_bytes;
894
895  double all_ro_perc = 100.0 * double(all_ro_bytes) / double(ro_all);
896  double all_rw_perc = 100.0 * double(all_rw_bytes) / double(rw_all);
897  double all_perc    = 100.0 * double(all_bytes)    / double(ro_all + rw_all);
898
899  info_stream.print_cr("%s", sep);
900  info_stream.print_cr(fmt_stats, "Total",
901                       all_ro_count, all_ro_bytes, all_ro_perc,
902                       all_rw_count, all_rw_bytes, all_rw_perc,
903                       all_count, all_bytes, all_perc);
904
905  assert(all_ro_bytes == ro_all, "everything should have been counted");
906  assert(all_rw_bytes == rw_all, "everything should have been counted");
907
908  msg.info("%s", info_stream.as_string());
909#undef fmt_stats
910}
911
912// Populate the shared space.
913
914class VM_PopulateDumpSharedSpace: public VM_Operation {
915private:
916  GrowableArray<MemRegion> *_string_regions;
917  GrowableArray<MemRegion> *_open_archive_heap_regions;
918
919  void dump_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN;
920  void dump_symbols();
921  char* dump_read_only_tables();
922  void print_region_stats();
923  void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
924                               const char *name, const size_t total_size);
925public:
926
927  VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
928  void doit();   // outline because gdb sucks
929  static void write_region(FileMapInfo* mapinfo, int region, DumpRegion* space, bool read_only,  bool allow_exec);
930}; // class VM_PopulateDumpSharedSpace
931
932class SortedSymbolClosure: public SymbolClosure {
933  GrowableArray<Symbol*> _symbols;
934  virtual void do_symbol(Symbol** sym) {
935    assert((*sym)->is_permanent(), "archived symbols must be permanent");
936    _symbols.append(*sym);
937  }
938  static int compare_symbols_by_address(Symbol** a, Symbol** b) {
939    if (a[0] < b[0]) {
940      return -1;
941    } else if (a[0] == b[0]) {
942      return 0;
943    } else {
944      return 1;
945    }
946  }
947
948public:
949  SortedSymbolClosure() {
950    SymbolTable::symbols_do(this);
951    _symbols.sort(compare_symbols_by_address);
952  }
953  GrowableArray<Symbol*>* get_sorted_symbols() {
954    return &_symbols;
955  }
956};
957
958// ArchiveCompactor --
959//
960// This class is the central piece of shared archive compaction -- all metaspace data are
961// initially allocated outside of the shared regions. ArchiveCompactor copies the
962// metaspace data into their final location in the shared regions.
963
964class ArchiveCompactor : AllStatic {
965  static DumpAllocStats* _alloc_stats;
966  static SortedSymbolClosure* _ssc;
967
968  static unsigned my_hash(const address& a) {
969    return primitive_hash<address>(a);
970  }
971  static bool my_equals(const address& a0, const address& a1) {
972    return primitive_equals<address>(a0, a1);
973  }
974  typedef ResourceHashtable<
975      address, address,
976      ArchiveCompactor::my_hash,   // solaris compiler doesn't like: primitive_hash<address>
977      ArchiveCompactor::my_equals, // solaris compiler doesn't like: primitive_equals<address>
978      16384, ResourceObj::C_HEAP> RelocationTable;
979  static RelocationTable* _new_loc_table;
980
981public:
982  static void initialize() {
983    _alloc_stats = new(ResourceObj::C_HEAP, mtInternal)DumpAllocStats;
984    _new_loc_table = new(ResourceObj::C_HEAP, mtInternal)RelocationTable;
985  }
986  static DumpAllocStats* alloc_stats() {
987    return _alloc_stats;
988  }
989
990  static void allocate(MetaspaceClosure::Ref* ref, bool read_only) {
991    address obj = ref->obj();
992    int bytes = ref->size() * BytesPerWord;
993    char* p;
994    size_t alignment = BytesPerWord;
995    char* oldtop;
996    char* newtop;
997
998    if (read_only) {
999      oldtop = _ro_region.top();
1000      p = _ro_region.allocate(bytes, alignment);
1001      newtop = _ro_region.top();
1002    } else {
1003      oldtop = _rw_region.top();
1004      p = _rw_region.allocate(bytes, alignment);
1005      newtop = _rw_region.top();
1006    }
1007    memcpy(p, obj, bytes);
1008    bool isnew = _new_loc_table->put(obj, (address)p);
1009    log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes);
1010    assert(isnew, "must be");
1011
1012    _alloc_stats->record(ref->msotype(), int(newtop - oldtop), read_only);
1013    if (ref->msotype() == MetaspaceObj::SymbolType) {
1014      uintx delta = MetaspaceShared::object_delta(p);
1015      if (delta > MAX_SHARED_DELTA) {
1016        // This is just a sanity check and should not appear in any real world usage. This
1017        // happens only if you allocate more than 2GB of Symbols and would require
1018        // millions of shared classes.
1019        vm_exit_during_initialization("Too many Symbols in the CDS archive",
1020                                      "Please reduce the number of shared classes.");
1021      }
1022    }
1023  }
1024
1025  static address get_new_loc(MetaspaceClosure::Ref* ref) {
1026    address* pp = _new_loc_table->get(ref->obj());
1027    assert(pp != NULL, "must be");
1028    return *pp;
1029  }
1030
1031private:
1032  // Makes a shallow copy of visited MetaspaceObj's
1033  class ShallowCopier: public UniqueMetaspaceClosure {
1034    bool _read_only;
1035  public:
1036    ShallowCopier(bool read_only) : _read_only(read_only) {}
1037
1038    virtual void do_unique_ref(Ref* ref, bool read_only) {
1039      if (read_only == _read_only) {
1040        allocate(ref, read_only);
1041      }
1042    }
1043  };
1044
1045  // Relocate embedded pointers within a MetaspaceObj's shallow copy
1046  class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure {
1047  public:
1048    virtual void do_unique_ref(Ref* ref, bool read_only) {
1049      address new_loc = get_new_loc(ref);
1050      RefRelocator refer;
1051      ref->metaspace_pointers_do_at(&refer, new_loc);
1052    }
1053  };
1054
1055  // Relocate a reference to point to its shallow copy
1056  class RefRelocator: public MetaspaceClosure {
1057  public:
1058    virtual bool do_ref(Ref* ref, bool read_only) {
1059      if (ref->not_null()) {
1060        ref->update(get_new_loc(ref));
1061      }
1062      return false; // Do not recurse.
1063    }
1064  };
1065
1066#ifdef ASSERT
1067  class IsRefInArchiveChecker: public MetaspaceClosure {
1068  public:
1069    virtual bool do_ref(Ref* ref, bool read_only) {
1070      if (ref->not_null()) {
1071        char* obj = (char*)ref->obj();
1072        assert(_ro_region.contains(obj) || _rw_region.contains(obj),
1073               "must be relocated to point to CDS archive");
1074      }
1075      return false; // Do not recurse.
1076    }
1077  };
1078#endif
1079
1080public:
1081  static void copy_and_compact() {
1082    // We should no longer allocate anything from the metaspace, so that
1083    // we can have a stable set of MetaspaceObjs to work with.
1084    Metaspace::freeze();
1085
1086    ResourceMark rm;
1087    SortedSymbolClosure the_ssc; // StackObj
1088    _ssc = &the_ssc;
1089
1090    tty->print_cr("Scanning all metaspace objects ... ");
1091    {
1092      // allocate and shallow-copy RW objects, immediately following the MC region
1093      tty->print_cr("Allocating RW objects ... ");
1094      _mc_region.pack(&_rw_region);
1095
1096      ResourceMark rm;
1097      ShallowCopier rw_copier(false);
1098      iterate_roots(&rw_copier);
1099    }
1100    {
1101      // allocate and shallow-copy of RO object, immediately following the RW region
1102      tty->print_cr("Allocating RO objects ... ");
1103      _rw_region.pack(&_ro_region);
1104
1105      ResourceMark rm;
1106      ShallowCopier ro_copier(true);
1107      iterate_roots(&ro_copier);
1108    }
1109    {
1110      tty->print_cr("Relocating embedded pointers ... ");
1111      ResourceMark rm;
1112      ShallowCopyEmbeddedRefRelocator emb_reloc;
1113      iterate_roots(&emb_reloc);
1114    }
1115    {
1116      tty->print_cr("Relocating external roots ... ");
1117      ResourceMark rm;
1118      RefRelocator ext_reloc;
1119      iterate_roots(&ext_reloc);
1120    }
1121
1122#ifdef ASSERT
1123    {
1124      tty->print_cr("Verifying external roots ... ");
1125      ResourceMark rm;
1126      IsRefInArchiveChecker checker;
1127      iterate_roots(&checker);
1128    }
1129#endif
1130
1131
1132    // cleanup
1133    _ssc = NULL;
1134  }
1135
1136  // We must relocate the System::_well_known_klasses only after we have copied the
1137  // java objects in during dump_java_heap_objects(): during the object copy, we operate on
1138  // old objects which assert that their klass is the original klass.
1139  static void relocate_well_known_klasses() {
1140    {
1141      tty->print_cr("Relocating SystemDictionary::_well_known_klasses[] ... ");
1142      ResourceMark rm;
1143      RefRelocator ext_reloc;
1144      SystemDictionary::well_known_klasses_do(&ext_reloc);
1145    }
1146    // NOTE: after this point, we shouldn't have any globals that can reach the old
1147    // objects.
1148
1149    // We cannot use any of the objects in the heap anymore (except for the objects
1150    // in the CDS shared string regions) because their headers no longer point to
1151    // valid Klasses.
1152  }
1153
1154  static void iterate_roots(MetaspaceClosure* it) {
1155    GrowableArray<Symbol*>* symbols = _ssc->get_sorted_symbols();
1156    for (int i=0; i<symbols->length(); i++) {
1157      it->push(symbols->adr_at(i));
1158    }
1159    if (_global_klass_objects != NULL) {
1160      // Need to fix up the pointers
1161      for (int i = 0; i < _global_klass_objects->length(); i++) {
1162        // NOTE -- this requires that the vtable is NOT yet patched, or else we are hosed.
1163        it->push(_global_klass_objects->adr_at(i));
1164      }
1165    }
1166    FileMapInfo::metaspace_pointers_do(it);
1167    SystemDictionary::classes_do(it);
1168    Universe::metaspace_pointers_do(it);
1169    SymbolTable::metaspace_pointers_do(it);
1170    vmSymbols::metaspace_pointers_do(it);
1171  }
1172
1173  static Klass* get_relocated_klass(Klass* orig_klass) {
1174    address* pp = _new_loc_table->get((address)orig_klass);
1175    assert(pp != NULL, "must be");
1176    Klass* klass = (Klass*)(*pp);
1177    assert(klass->is_klass(), "must be");
1178    return klass;
1179  }
1180};
1181
1182DumpAllocStats* ArchiveCompactor::_alloc_stats;
1183SortedSymbolClosure* ArchiveCompactor::_ssc;
1184ArchiveCompactor::RelocationTable* ArchiveCompactor::_new_loc_table;
1185
1186void VM_PopulateDumpSharedSpace::write_region(FileMapInfo* mapinfo, int region_idx,
1187                                              DumpRegion* dump_region, bool read_only,  bool allow_exec) {
1188  mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
1189}
1190
1191void VM_PopulateDumpSharedSpace::dump_symbols() {
1192  tty->print_cr("Dumping symbol table ...");
1193
1194  NOT_PRODUCT(SymbolTable::verify());
1195  SymbolTable::write_to_archive();
1196}
1197
1198char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
1199  char* oldtop = _ro_region.top();
1200  // Reorder the system dictionary. Moving the symbols affects
1201  // how the hash table indices are calculated.
1202  SystemDictionary::reorder_dictionary_for_sharing();
1203  tty->print("Removing java_mirror ... ");
1204  remove_java_mirror_in_classes();
1205  tty->print_cr("done. ");
1206  NOT_PRODUCT(SystemDictionary::verify();)
1207
1208  size_t buckets_bytes = SystemDictionary::count_bytes_for_buckets();
1209  char* buckets_top = _ro_region.allocate(buckets_bytes, sizeof(intptr_t));
1210  SystemDictionary::copy_buckets(buckets_top, _ro_region.top());
1211
1212  size_t table_bytes = SystemDictionary::count_bytes_for_table();
1213  char* table_top = _ro_region.allocate(table_bytes, sizeof(intptr_t));
1214  SystemDictionary::copy_table(table_top, _ro_region.top());
1215
1216  // Write the other data to the output array.
1217  WriteClosure wc(&_ro_region);
1218  MetaspaceShared::serialize(&wc);
1219
1220  char* newtop = _ro_region.top();
1221  ArchiveCompactor::alloc_stats()->record_other_type(int(newtop - oldtop), true);
1222  return buckets_top;
1223}
1224
1225void VM_PopulateDumpSharedSpace::doit() {
1226  Thread* THREAD = VMThread::vm_thread();
1227
1228  NOT_PRODUCT(SystemDictionary::verify();)
1229  // The following guarantee is meant to ensure that no loader constraints
1230  // exist yet, since the constraints table is not shared.  This becomes
1231  // more important now that we don't re-initialize vtables/itables for
1232  // shared classes at runtime, where constraints were previously created.
1233  guarantee(SystemDictionary::constraints()->number_of_entries() == 0,
1234            "loader constraints are not saved");
1235  guarantee(SystemDictionary::placeholders()->number_of_entries() == 0,
1236          "placeholders are not saved");
1237  // Revisit and implement this if we prelink method handle call sites:
1238  guarantee(SystemDictionary::invoke_method_table() == NULL ||
1239            SystemDictionary::invoke_method_table()->number_of_entries() == 0,
1240            "invoke method table is not saved");
1241
1242  // At this point, many classes have been loaded.
1243  // Gather systemDictionary classes in a global array and do everything to
1244  // that so we don't have to walk the SystemDictionary again.
1245  _global_klass_objects = new GrowableArray<Klass*>(1000);
1246  CollectClassesClosure collect_classes;
1247  ClassLoaderDataGraph::loaded_classes_do(&collect_classes);
1248
1249  tty->print_cr("Number of classes %d", _global_klass_objects->length());
1250  {
1251    int num_type_array = 0, num_obj_array = 0, num_inst = 0;
1252    for (int i = 0; i < _global_klass_objects->length(); i++) {
1253      Klass* k = _global_klass_objects->at(i);
1254      if (k->is_instance_klass()) {
1255        num_inst ++;
1256      } else if (k->is_objArray_klass()) {
1257        num_obj_array ++;
1258      } else {
1259        assert(k->is_typeArray_klass(), "sanity");
1260        num_type_array ++;
1261      }
1262    }
1263    tty->print_cr("    instance classes   = %5d", num_inst);
1264    tty->print_cr("    obj array classes  = %5d", num_obj_array);
1265    tty->print_cr("    type array classes = %5d", num_type_array);
1266  }
1267
1268  // Ensure the ConstMethods won't be modified at run-time
1269  tty->print("Updating ConstMethods ... ");
1270  rewrite_nofast_bytecodes_and_calculate_fingerprints();
1271  tty->print_cr("done. ");
1272
1273  // Move classes from platform/system dictionaries into the boot dictionary
1274  SystemDictionary::combine_shared_dictionaries();
1275
1276  // Remove all references outside the metadata
1277  tty->print("Removing unshareable information ... ");
1278  remove_unshareable_in_classes();
1279  tty->print_cr("done. ");
1280
1281  // We don't support archiving anonymous classes. Verify that they are not stored in
1282  // the any dictionaries.
1283  NOT_PRODUCT(assert_no_anonymoys_classes_in_dictionaries());
1284
1285  SystemDictionaryShared::finalize_verification_constraints();
1286
1287  ArchiveCompactor::initialize();
1288  ArchiveCompactor::copy_and_compact();
1289
1290  dump_symbols();
1291
1292  // Dump supported java heap objects
1293  _string_regions = NULL;
1294  _open_archive_heap_regions = NULL;
1295  dump_java_heap_objects();
1296
1297  ArchiveCompactor::relocate_well_known_klasses();
1298
1299  char* read_only_tables_start = dump_read_only_tables();
1300  _ro_region.pack(&_md_region);
1301
1302  char* vtbl_list = _md_region.top();
1303  MetaspaceShared::allocate_cpp_vtable_clones();
1304  _md_region.pack(&_od_region);
1305
1306  // Relocate the archived class file data into the od region
1307  relocate_cached_class_file();
1308  _od_region.pack();
1309
1310  // The 5 core spaces are allocated consecutively mc->rw->ro->md->od, so there total size
1311  // is just the spaces between the two ends.
1312  size_t core_spaces_size = _od_region.end() - _mc_region.base();
1313  assert(core_spaces_size == (size_t)align_up(core_spaces_size, Metaspace::reserve_alignment()),
1314         "should already be aligned");
1315
1316  // During patching, some virtual methods may be called, so at this point
1317  // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate).
1318  MetaspaceShared::patch_cpp_vtable_pointers();
1319
1320  // The vtable clones contain addresses of the current process.
1321  // We don't want to write these addresses into the archive.
1322  MetaspaceShared::zero_cpp_vtable_clones_for_writing();
1323
1324  // Create and write the archive file that maps the shared spaces.
1325
1326  FileMapInfo* mapinfo = new FileMapInfo();
1327  mapinfo->populate_header(os::vm_allocation_granularity());
1328  mapinfo->set_read_only_tables_start(read_only_tables_start);
1329  mapinfo->set_misc_data_patching_start(vtbl_list);
1330  mapinfo->set_cds_i2i_entry_code_buffers(MetaspaceShared::cds_i2i_entry_code_buffers());
1331  mapinfo->set_cds_i2i_entry_code_buffers_size(MetaspaceShared::cds_i2i_entry_code_buffers_size());
1332  mapinfo->set_core_spaces_size(core_spaces_size);
1333
1334  for (int pass=1; pass<=2; pass++) {
1335    if (pass == 1) {
1336      // The first pass doesn't actually write the data to disk. All it
1337      // does is to update the fields in the mapinfo->_header.
1338    } else {
1339      // After the first pass, the contents of mapinfo->_header are finalized,
1340      // so we can compute the header's CRC, and write the contents of the header
1341      // and the regions into disk.
1342      mapinfo->open_for_write();
1343      mapinfo->set_header_crc(mapinfo->compute_header_crc());
1344    }
1345    mapinfo->write_header();
1346
1347    // NOTE: md contains the trampoline code for method entries, which are patched at run time,
1348    // so it needs to be read/write.
1349    write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true);
1350    write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
1351    write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
1352    write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false);
1353    write_region(mapinfo, MetaspaceShared::od, &_od_region, /*read_only=*/true, /*allow_exec=*/false);
1354
1355    _total_string_region_size = mapinfo->write_archive_heap_regions(
1356                                        _string_regions,
1357                                        MetaspaceShared::first_string,
1358                                        MetaspaceShared::max_strings);
1359    _total_open_archive_region_size = mapinfo->write_archive_heap_regions(
1360                                        _open_archive_heap_regions,
1361                                        MetaspaceShared::first_open_archive_heap_region,
1362                                        MetaspaceShared::max_open_archive_heap_region);
1363  }
1364
1365  mapinfo->close();
1366
1367  // Restore the vtable in case we invoke any virtual methods.
1368  MetaspaceShared::clone_cpp_vtables((intptr_t*)vtbl_list);
1369
1370  print_region_stats();
1371
1372  if (log_is_enabled(Info, cds)) {
1373    ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()),
1374                                                 int(_mc_region.used()), int(_md_region.used()));
1375  }
1376
1377  if (PrintSystemDictionaryAtExit) {
1378    SystemDictionary::print();
1379  }
1380  // There may be other pending VM operations that operate on the InstanceKlasses,
1381  // which will fail because InstanceKlasses::remove_unshareable_info()
1382  // has been called. Forget these operations and exit the VM directly.
1383  vm_direct_exit(0);
1384}
1385
1386void VM_PopulateDumpSharedSpace::print_region_stats() {
1387  // Print statistics of all the regions
1388  const size_t total_reserved = _ro_region.reserved()  + _rw_region.reserved() +
1389                                _mc_region.reserved()  + _md_region.reserved() +
1390                                _od_region.reserved()  +
1391                                _total_string_region_size +
1392                                _total_open_archive_region_size;
1393  const size_t total_bytes = _ro_region.used()  + _rw_region.used() +
1394                             _mc_region.used()  + _md_region.used() +
1395                             _od_region.used()  +
1396                             _total_string_region_size +
1397                             _total_open_archive_region_size;
1398  const double total_u_perc = total_bytes / double(total_reserved) * 100.0;
1399
1400  _mc_region.print(total_reserved);
1401  _rw_region.print(total_reserved);
1402  _ro_region.print(total_reserved);
1403  _md_region.print(total_reserved);
1404  _od_region.print(total_reserved);
1405  print_heap_region_stats(_string_regions, "st", total_reserved);
1406  print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved);
1407
1408  tty->print_cr("total   : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
1409                 total_bytes, total_reserved, total_u_perc);
1410}
1411
1412void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
1413                                                         const char *name, const size_t total_size) {
1414  int arr_len = heap_mem == NULL ? 0 : heap_mem->length();
1415  for (int i = 0; i < arr_len; i++) {
1416      char* start = (char*)heap_mem->at(i).start();
1417      size_t size = heap_mem->at(i).byte_size();
1418      char* top = start + size;
1419      tty->print_cr("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100%% used] at " INTPTR_FORMAT,
1420                    name, i, size, size/double(total_size)*100.0, size, p2i(start));
1421
1422  }
1423}
1424
1425// Update a Java object to point its Klass* to the new location after
1426// shared archive has been compacted.
1427void MetaspaceShared::relocate_klass_ptr(oop o) {
1428  assert(DumpSharedSpaces, "sanity");
1429  Klass* k = ArchiveCompactor::get_relocated_klass(o->klass());
1430  o->set_klass(k);
1431}
1432
1433class LinkSharedClassesClosure : public KlassClosure {
1434  Thread* THREAD;
1435  bool    _made_progress;
1436 public:
1437  LinkSharedClassesClosure(Thread* thread) : THREAD(thread), _made_progress(false) {}
1438
1439  void reset()               { _made_progress = false; }
1440  bool made_progress() const { return _made_progress; }
1441
1442  void do_klass(Klass* k) {
1443    if (k->is_instance_klass()) {
1444      InstanceKlass* ik = InstanceKlass::cast(k);
1445      // Link the class to cause the bytecodes to be rewritten and the
1446      // cpcache to be created. Class verification is done according
1447      // to -Xverify setting.
1448      _made_progress |= MetaspaceShared::try_link_class(ik, THREAD);
1449      guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
1450
1451      ik->constants()->resolve_class_constants(THREAD);
1452    }
1453  }
1454};
1455
1456class CheckSharedClassesClosure : public KlassClosure {
1457  bool    _made_progress;
1458 public:
1459  CheckSharedClassesClosure() : _made_progress(false) {}
1460
1461  void reset()               { _made_progress = false; }
1462  bool made_progress() const { return _made_progress; }
1463  void do_klass(Klass* k) {
1464    if (k->is_instance_klass() && InstanceKlass::cast(k)->check_sharing_error_state()) {
1465      _made_progress = true;
1466    }
1467  }
1468};
1469
1470void MetaspaceShared::check_shared_class_loader_type(Klass* k) {
1471  if (k->is_instance_klass()) {
1472    InstanceKlass* ik = InstanceKlass::cast(k);
1473    u2 loader_type = ik->loader_type();
1474    ResourceMark rm;
1475    guarantee(loader_type != 0,
1476              "Class loader type is not set for this class %s", ik->name()->as_C_string());
1477  }
1478}
1479
1480void MetaspaceShared::link_and_cleanup_shared_classes(TRAPS) {
1481  // We need to iterate because verification may cause additional classes
1482  // to be loaded.
1483  LinkSharedClassesClosure link_closure(THREAD);
1484  do {
1485    link_closure.reset();
1486    ClassLoaderDataGraph::loaded_classes_do(&link_closure);
1487    guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
1488  } while (link_closure.made_progress());
1489
1490  if (_has_error_classes) {
1491    // Mark all classes whose super class or interfaces failed verification.
1492    CheckSharedClassesClosure check_closure;
1493    do {
1494      // Not completely sure if we need to do this iteratively. Anyway,
1495      // we should come here only if there are unverifiable classes, which
1496      // shouldn't happen in normal cases. So better safe than sorry.
1497      check_closure.reset();
1498      ClassLoaderDataGraph::loaded_classes_do(&check_closure);
1499    } while (check_closure.made_progress());
1500
1501    if (IgnoreUnverifiableClassesDuringDump) {
1502      // This is useful when running JCK or SQE tests. You should not
1503      // enable this when running real apps.
1504      SystemDictionary::remove_classes_in_error_state();
1505    } else {
1506      tty->print_cr("Please remove the unverifiable classes from your class list and try again");
1507      exit(1);
1508    }
1509  }
1510}
1511
1512void MetaspaceShared::prepare_for_dumping() {
1513  Arguments::check_unsupported_dumping_properties();
1514  ClassLoader::initialize_shared_path();
1515  FileMapInfo::allocate_classpath_entry_table();
1516}
1517
1518// Preload classes from a list, populate the shared spaces and dump to a
1519// file.
1520void MetaspaceShared::preload_and_dump(TRAPS) {
1521  { TraceTime timer("Dump Shared Spaces", TRACETIME_LOG(Info, startuptime));
1522    ResourceMark rm;
1523    char class_list_path_str[JVM_MAXPATHLEN];
1524    // Preload classes to be shared.
1525    // Should use some os:: method rather than fopen() here. aB.
1526    const char* class_list_path;
1527    if (SharedClassListFile == NULL) {
1528      // Construct the path to the class list (in jre/lib)
1529      // Walk up two directories from the location of the VM and
1530      // optionally tack on "lib" (depending on platform)
1531      os::jvm_path(class_list_path_str, sizeof(class_list_path_str));
1532      for (int i = 0; i < 3; i++) {
1533        char *end = strrchr(class_list_path_str, *os::file_separator());
1534        if (end != NULL) *end = '\0';
1535      }
1536      int class_list_path_len = (int)strlen(class_list_path_str);
1537      if (class_list_path_len >= 3) {
1538        if (strcmp(class_list_path_str + class_list_path_len - 3, "lib") != 0) {
1539          if (class_list_path_len < JVM_MAXPATHLEN - 4) {
1540            jio_snprintf(class_list_path_str + class_list_path_len,
1541                         sizeof(class_list_path_str) - class_list_path_len,
1542                         "%slib", os::file_separator());
1543            class_list_path_len += 4;
1544          }
1545        }
1546      }
1547      if (class_list_path_len < JVM_MAXPATHLEN - 10) {
1548        jio_snprintf(class_list_path_str + class_list_path_len,
1549                     sizeof(class_list_path_str) - class_list_path_len,
1550                     "%sclasslist", os::file_separator());
1551      }
1552      class_list_path = class_list_path_str;
1553    } else {
1554      class_list_path = SharedClassListFile;
1555    }
1556
1557    tty->print_cr("Loading classes to share ...");
1558    _has_error_classes = false;
1559    int class_count = preload_classes(class_list_path, THREAD);
1560    if (ExtraSharedClassListFile) {
1561      class_count += preload_classes(ExtraSharedClassListFile, THREAD);
1562    }
1563    tty->print_cr("Loading classes to share: done.");
1564
1565    log_info(cds)("Shared spaces: preloaded %d classes", class_count);
1566
1567    // Rewrite and link classes
1568    tty->print_cr("Rewriting and linking classes ...");
1569
1570    // Link any classes which got missed. This would happen if we have loaded classes that
1571    // were not explicitly specified in the classlist. E.g., if an interface implemented by class K
1572    // fails verification, all other interfaces that were not specified in the classlist but
1573    // are implemented by K are not verified.
1574    link_and_cleanup_shared_classes(CATCH);
1575    tty->print_cr("Rewriting and linking classes: done");
1576
1577    SystemDictionary::clear_invoke_method_table();
1578
1579    VM_PopulateDumpSharedSpace op;
1580    VMThread::execute(&op);
1581  }
1582}
1583
1584
1585int MetaspaceShared::preload_classes(const char* class_list_path, TRAPS) {
1586  ClassListParser parser(class_list_path);
1587  int class_count = 0;
1588
1589    while (parser.parse_one_line()) {
1590      Klass* klass = ClassLoaderExt::load_one_class(&parser, THREAD);
1591      if (HAS_PENDING_EXCEPTION) {
1592        if (klass == NULL &&
1593             (PENDING_EXCEPTION->klass()->name() == vmSymbols::java_lang_ClassNotFoundException())) {
1594          // print a warning only when the pending exception is class not found
1595          tty->print_cr("Preload Warning: Cannot find %s", parser.current_class_name());
1596        }
1597        CLEAR_PENDING_EXCEPTION;
1598      }
1599      if (klass != NULL) {
1600        if (log_is_enabled(Trace, cds)) {
1601          ResourceMark rm;
1602          log_trace(cds)("Shared spaces preloaded: %s", klass->external_name());
1603        }
1604
1605        InstanceKlass* ik = InstanceKlass::cast(klass);
1606
1607        // Link the class to cause the bytecodes to be rewritten and the
1608        // cpcache to be created. The linking is done as soon as classes
1609        // are loaded in order that the related data structures (klass and
1610        // cpCache) are located together.
1611        try_link_class(ik, THREAD);
1612        guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
1613
1614        class_count++;
1615      }
1616    }
1617
1618  return class_count;
1619}
1620
1621// Returns true if the class's status has changed
1622bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) {
1623  assert(DumpSharedSpaces, "should only be called during dumping");
1624  if (ik->init_state() < InstanceKlass::linked) {
1625    bool saved = BytecodeVerificationLocal;
1626    if (!(ik->is_shared_boot_class())) {
1627      // The verification decision is based on BytecodeVerificationRemote
1628      // for non-system classes. Since we are using the NULL classloader
1629      // to load non-system classes during dumping, we need to temporarily
1630      // change BytecodeVerificationLocal to be the same as
1631      // BytecodeVerificationRemote. Note this can cause the parent system
1632      // classes also being verified. The extra overhead is acceptable during
1633      // dumping.
1634      BytecodeVerificationLocal = BytecodeVerificationRemote;
1635    }
1636    ik->link_class(THREAD);
1637    if (HAS_PENDING_EXCEPTION) {
1638      ResourceMark rm;
1639      tty->print_cr("Preload Warning: Verification failed for %s",
1640                    ik->external_name());
1641      CLEAR_PENDING_EXCEPTION;
1642      ik->set_in_error_state();
1643      _has_error_classes = true;
1644    }
1645    BytecodeVerificationLocal = saved;
1646    return true;
1647  } else {
1648    return false;
1649  }
1650}
1651
1652#if INCLUDE_CDS_JAVA_HEAP
1653void VM_PopulateDumpSharedSpace::dump_java_heap_objects() {
1654  if (!MetaspaceShared::is_heap_object_archiving_allowed()) {
1655    if (log_is_enabled(Info, cds)) {
1656      log_info(cds)(
1657        "Archived java heap is not supported as UseG1GC, "
1658        "UseCompressedOops and UseCompressedClassPointers are required."
1659        "Current settings: UseG1GC=%s, UseCompressedOops=%s, UseCompressedClassPointers=%s.",
1660        BOOL_TO_STR(UseG1GC), BOOL_TO_STR(UseCompressedOops),
1661        BOOL_TO_STR(UseCompressedClassPointers));
1662    }
1663    return;
1664  }
1665
1666  {
1667    NoSafepointVerifier nsv;
1668
1669    // Cache for recording where the archived objects are copied to
1670    MetaspaceShared::create_archive_object_cache();
1671
1672    tty->print_cr("Dumping String objects to closed archive heap region ...");
1673    NOT_PRODUCT(StringTable::verify());
1674    // The string space has maximum two regions. See FileMapInfo::write_archive_heap_regions() for details.
1675    _string_regions = new GrowableArray<MemRegion>(2);
1676    StringTable::write_to_archive(_string_regions);
1677
1678    tty->print_cr("Dumping objects to open archive heap region ...");
1679    _open_archive_heap_regions = new GrowableArray<MemRegion>(2);
1680    MetaspaceShared::dump_open_archive_heap_objects(_open_archive_heap_regions);
1681
1682    MetaspaceShared::destroy_archive_object_cache();
1683  }
1684
1685  G1HeapVerifier::verify_archive_regions();
1686}
1687
1688void MetaspaceShared::dump_open_archive_heap_objects(
1689                                    GrowableArray<MemRegion> * open_archive) {
1690  assert(UseG1GC, "Only support G1 GC");
1691  assert(UseCompressedOops && UseCompressedClassPointers,
1692         "Only support UseCompressedOops and UseCompressedClassPointers enabled");
1693
1694  Thread* THREAD = Thread::current();
1695  G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */);
1696
1697  MetaspaceShared::archive_resolved_constants(THREAD);
1698
1699  G1CollectedHeap::heap()->end_archive_alloc_range(open_archive,
1700                                                   os::vm_allocation_granularity());
1701}
1702
1703MetaspaceShared::ArchivedObjectCache* MetaspaceShared::_archive_object_cache = NULL;
1704oop MetaspaceShared::archive_heap_object(oop obj, Thread* THREAD) {
1705  assert(DumpSharedSpaces, "dump-time only");
1706
1707  ArchivedObjectCache* cache = MetaspaceShared::archive_object_cache();
1708  oop* p = cache->get(obj);
1709  if (p != NULL) {
1710    // already archived
1711    return *p;
1712  }
1713
1714  int len = obj->size();
1715  if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
1716    return NULL;
1717  }
1718
1719  int hash = obj->identity_hash();
1720  oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len);
1721  if (archived_oop != NULL) {
1722    Copy::aligned_disjoint_words((HeapWord*)obj, (HeapWord*)archived_oop, len);
1723    relocate_klass_ptr(archived_oop);
1724    cache->put(obj, archived_oop);
1725  }
1726  return archived_oop;
1727}
1728
1729void MetaspaceShared::archive_resolved_constants(Thread* THREAD) {
1730  int i;
1731  for (i = 0; i < _global_klass_objects->length(); i++) {
1732    Klass* k = _global_klass_objects->at(i);
1733    if (k->is_instance_klass()) {
1734      InstanceKlass* ik = InstanceKlass::cast(k);
1735      ik->constants()->archive_resolved_references(THREAD);
1736    }
1737  }
1738}
1739
1740void MetaspaceShared::fixup_mapped_heap_regions() {
1741  FileMapInfo *mapinfo = FileMapInfo::current_info();
1742  mapinfo->fixup_mapped_heap_regions();
1743}
1744#endif // INCLUDE_CDS_JAVA_HEAP
1745
1746// Closure for serializing initialization data in from a data area
1747// (ptr_array) read from the shared file.
1748
1749class ReadClosure : public SerializeClosure {
1750private:
1751  intptr_t** _ptr_array;
1752
1753  inline intptr_t nextPtr() {
1754    return *(*_ptr_array)++;
1755  }
1756
1757public:
1758  ReadClosure(intptr_t** ptr_array) { _ptr_array = ptr_array; }
1759
1760  void do_ptr(void** p) {
1761    assert(*p == NULL, "initializing previous initialized pointer.");
1762    intptr_t obj = nextPtr();
1763    assert((intptr_t)obj >= 0 || (intptr_t)obj < -100,
1764           "hit tag while initializing ptrs.");
1765    *p = (void*)obj;
1766  }
1767
1768  void do_u4(u4* p) {
1769    intptr_t obj = nextPtr();
1770    *p = (u4)(uintx(obj));
1771  }
1772
1773  void do_tag(int tag) {
1774    int old_tag;
1775    old_tag = (int)(intptr_t)nextPtr();
1776    // do_int(&old_tag);
1777    assert(tag == old_tag, "old tag doesn't match");
1778    FileMapInfo::assert_mark(tag == old_tag);
1779  }
1780
1781  void do_region(u_char* start, size_t size) {
1782    assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
1783    assert(size % sizeof(intptr_t) == 0, "bad size");
1784    do_tag((int)size);
1785    while (size > 0) {
1786      *(intptr_t*)start = nextPtr();
1787      start += sizeof(intptr_t);
1788      size -= sizeof(intptr_t);
1789    }
1790  }
1791
1792  bool reading() const { return true; }
1793};
1794
1795// Return true if given address is in the mapped shared space.
1796bool MetaspaceShared::is_in_shared_space(const void* p) {
1797  return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_space(p);
1798}
1799
1800// Return true if given address is in the misc data region
1801bool MetaspaceShared::is_in_shared_region(const void* p, int idx) {
1802  return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx);
1803}
1804
1805bool MetaspaceShared::is_in_trampoline_frame(address addr) {
1806  if (UseSharedSpaces && is_in_shared_region(addr, MetaspaceShared::mc)) {
1807    return true;
1808  }
1809  return false;
1810}
1811
1812void MetaspaceShared::print_shared_spaces() {
1813  if (UseSharedSpaces) {
1814    FileMapInfo::current_info()->print_shared_spaces();
1815  }
1816}
1817
1818
1819// Map shared spaces at requested addresses and return if succeeded.
1820bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
1821  size_t image_alignment = mapinfo->alignment();
1822
1823#ifndef _WINDOWS
1824  // Map in the shared memory and then map the regions on top of it.
1825  // On Windows, don't map the memory here because it will cause the
1826  // mappings of the regions to fail.
1827  ReservedSpace shared_rs = mapinfo->reserve_shared_memory();
1828  if (!shared_rs.is_reserved()) return false;
1829#endif
1830
1831  assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
1832
1833  char* _ro_base = NULL;
1834  char* _rw_base = NULL;
1835  char* _mc_base = NULL;
1836  char* _md_base = NULL;
1837  char* _od_base = NULL;
1838
1839  // Map each shared region
1840  if ((_mc_base = mapinfo->map_region(mc)) != NULL &&
1841      mapinfo->verify_region_checksum(mc) &&
1842      (_rw_base = mapinfo->map_region(rw)) != NULL &&
1843      mapinfo->verify_region_checksum(rw) &&
1844      (_ro_base = mapinfo->map_region(ro)) != NULL &&
1845      mapinfo->verify_region_checksum(ro) &&
1846      (_md_base = mapinfo->map_region(md)) != NULL &&
1847      mapinfo->verify_region_checksum(md) &&
1848      (_od_base = mapinfo->map_region(od)) != NULL &&
1849      mapinfo->verify_region_checksum(od) &&
1850      (image_alignment == (size_t)os::vm_allocation_granularity()) &&
1851      mapinfo->validate_classpath_entry_table()) {
1852    // Success (no need to do anything)
1853    return true;
1854  } else {
1855    // If there was a failure in mapping any of the spaces, unmap the ones
1856    // that succeeded
1857    if (_ro_base != NULL) mapinfo->unmap_region(ro);
1858    if (_rw_base != NULL) mapinfo->unmap_region(rw);
1859    if (_mc_base != NULL) mapinfo->unmap_region(mc);
1860    if (_md_base != NULL) mapinfo->unmap_region(md);
1861    if (_od_base != NULL) mapinfo->unmap_region(od);
1862#ifndef _WINDOWS
1863    // Release the entire mapped region
1864    shared_rs.release();
1865#endif
1866    // If -Xshare:on is specified, print out the error message and exit VM,
1867    // otherwise, set UseSharedSpaces to false and continue.
1868    if (RequireSharedSpaces || PrintSharedArchiveAndExit) {
1869      vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on.");
1870    } else {
1871      FLAG_SET_DEFAULT(UseSharedSpaces, false);
1872    }
1873    return false;
1874  }
1875}
1876
1877// Read the miscellaneous data from the shared file, and
1878// serialize it out to its various destinations.
1879
1880void MetaspaceShared::initialize_shared_spaces() {
1881  FileMapInfo *mapinfo = FileMapInfo::current_info();
1882  _cds_i2i_entry_code_buffers = mapinfo->cds_i2i_entry_code_buffers();
1883  _cds_i2i_entry_code_buffers_size = mapinfo->cds_i2i_entry_code_buffers_size();
1884  _core_spaces_size = mapinfo->core_spaces_size();
1885  char* buffer = mapinfo->misc_data_patching_start();
1886  clone_cpp_vtables((intptr_t*)buffer);
1887
1888  // The rest of the data is now stored in the RW region
1889  buffer = mapinfo->read_only_tables_start();
1890  int sharedDictionaryLen = *(intptr_t*)buffer;
1891  buffer += sizeof(intptr_t);
1892  int number_of_entries = *(intptr_t*)buffer;
1893  buffer += sizeof(intptr_t);
1894  SystemDictionary::set_shared_dictionary((HashtableBucket<mtClass>*)buffer,
1895                                          sharedDictionaryLen,
1896                                          number_of_entries);
1897  buffer += sharedDictionaryLen;
1898
1899  // The following data are the linked list elements
1900  // (HashtableEntry objects) for the shared dictionary table.
1901
1902  int len = *(intptr_t*)buffer;     // skip over shared dictionary entries
1903  buffer += sizeof(intptr_t);
1904  buffer += len;
1905
1906  // Verify various attributes of the archive, plus initialize the
1907  // shared string/symbol tables
1908  intptr_t* array = (intptr_t*)buffer;
1909  ReadClosure rc(&array);
1910  serialize(&rc);
1911
1912  // Initialize the run-time symbol table.
1913  SymbolTable::create_table();
1914
1915  // Close the mapinfo file
1916  mapinfo->close();
1917
1918  if (PrintSharedArchiveAndExit) {
1919    if (PrintSharedDictionary) {
1920      tty->print_cr("\nShared classes:\n");
1921      SystemDictionary::print_shared(tty);
1922    }
1923    if (_archive_loading_failed) {
1924      tty->print_cr("archive is invalid");
1925      vm_exit(1);
1926    } else {
1927      tty->print_cr("archive is valid");
1928      vm_exit(0);
1929    }
1930  }
1931}
1932
1933// JVM/TI RedefineClasses() support:
1934bool MetaspaceShared::remap_shared_readonly_as_readwrite() {
1935  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1936
1937  if (UseSharedSpaces) {
1938    // remap the shared readonly space to shared readwrite, private
1939    FileMapInfo* mapinfo = FileMapInfo::current_info();
1940    if (!mapinfo->remap_shared_readonly_as_readwrite()) {
1941      return false;
1942    }
1943    _remapped_readwrite = true;
1944  }
1945  return true;
1946}
1947
1948void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) {
1949  // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
1950  // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes
1951  // or so.
1952  _mc_region.print_out_of_space_msg(name, needed_bytes);
1953  _rw_region.print_out_of_space_msg(name, needed_bytes);
1954  _ro_region.print_out_of_space_msg(name, needed_bytes);
1955  _md_region.print_out_of_space_msg(name, needed_bytes);
1956  _od_region.print_out_of_space_msg(name, needed_bytes);
1957
1958  vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name),
1959                                "Please reduce the number of shared classes.");
1960}
1961