metaspaceShared.cpp revision 3730:fb19af007ffc
1/*
2 * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/dictionary.hpp"
27#include "classfile/loaderConstraints.hpp"
28#include "classfile/placeholders.hpp"
29#include "classfile/symbolTable.hpp"
30#include "classfile/systemDictionary.hpp"
31#include "code/codeCache.hpp"
32#include "memory/filemap.hpp"
33#include "memory/gcLocker.hpp"
34#include "memory/metaspace.hpp"
35#include "memory/metaspaceShared.hpp"
36#include "oops/objArrayOop.hpp"
37#include "oops/oop.inline.hpp"
38#include "runtime/signature.hpp"
39#include "runtime/vm_operations.hpp"
40#include "runtime/vmThread.hpp"
41#include "utilities/hashtable.inline.hpp"
42
43
44int MetaspaceShared::_max_alignment = 0;
45
46ReservedSpace* MetaspaceShared::_shared_rs = NULL;
47
48// Read/write a data stream for restoring/preserving metadata pointers and
49// miscellaneous data from/to the shared archive file.
50
51void MetaspaceShared::serialize(SerializeClosure* soc) {
52  int tag = 0;
53  soc->do_tag(--tag);
54
55  assert(!UseCompressedOops, "UseCompressedOops doesn't work with shared archive");
56  // Verify the sizes of various metadata in the system.
57  soc->do_tag(sizeof(Method));
58  soc->do_tag(sizeof(ConstMethod));
59  soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE));
60  soc->do_tag(sizeof(ConstantPool));
61  soc->do_tag(sizeof(ConstantPoolCache));
62  soc->do_tag(objArrayOopDesc::base_offset_in_bytes());
63  soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE));
64  soc->do_tag(sizeof(Symbol));
65
66  // Dump/restore miscellaneous metadata.
67  Universe::serialize(soc, true);
68  soc->do_tag(--tag);
69
70  // Dump/restore references to commonly used names and signatures.
71  vmSymbols::serialize(soc);
72  soc->do_tag(--tag);
73
74  soc->do_tag(666);
75}
76
77
78// CDS code for dumping shared archive.
79
80// Global object for holding classes that have been loaded.  Since this
81// is run at a safepoint just before exit, this is the entire set of classes.
82static GrowableArray<Klass*>* _global_klass_objects;
83static void collect_classes(Klass* k) {
84  _global_klass_objects->append_if_missing(k);
85  if (k->oop_is_instance()) {
86    // Add in the array classes too
87    InstanceKlass* ik = InstanceKlass::cast(k);
88    ik->array_klasses_do(collect_classes);
89  }
90}
91
92static void remove_unshareable_in_classes() {
93  for (int i = 0; i < _global_klass_objects->length(); i++) {
94    Klass* k = _global_klass_objects->at(i);
95    k->remove_unshareable_info();
96  }
97}
98
99// Walk all methods in the class list and assign a fingerprint.
100// so that this part of the ConstMethod* is read only.
101static void calculate_fingerprints() {
102  for (int i = 0; i < _global_klass_objects->length(); i++) {
103    Klass* k = _global_klass_objects->at(i);
104    if (k->oop_is_instance()) {
105      InstanceKlass* ik = InstanceKlass::cast(k);
106      for (int i = 0; i < ik->methods()->length(); i++) {
107        ResourceMark rm;
108        Method* m = ik->methods()->at(i);
109        (new Fingerprinter(m))->fingerprint();
110      }
111    }
112  }
113}
114
115// Patch C++ vtable pointer in metadata.
116
117// Klass and other metadata objects contain references to c++ vtables in the
118// JVM library.
119// Fix them to point to our constructed vtables.  However, don't iterate
120// across the space while doing this, as that causes the vtables to be
121// patched, undoing our useful work.  Instead, iterate to make a list,
122// then use the list to do the fixing.
123//
124// Our constructed vtables:
125// Dump time:
126//  1. init_self_patching_vtbl_list: table of pointers to current virtual method addrs
127//  2. generate_vtable_methods: create jump table, appended to above vtbl_list
128//  3. patch_klass_vtables: for Klass list, patch the vtable entry in klass and
129//     associated metadata to point to jump table rather than to current vtbl
130// Table layout: NOTE FIXED SIZE
131//   1. vtbl pointers
132//   2. #Klass X #virtual methods per Klass
133//   1 entry for each, in the order:
134//   Klass1:method1 entry, Klass1:method2 entry, ... Klass1:method<num_virtuals> entry
135//   Klass2:method1 entry, Klass2:method2 entry, ... Klass2:method<num_virtuals> entry
136//   ...
137//   Klass<vtbl_list_size>:method1 entry, Klass<vtbl_list_size>:method2 entry,
138//       ... Klass<vtbl_list_size>:method<num_virtuals> entry
139//  Sample entry: (Sparc):
140//   save(sp, -256, sp)
141//   ba,pt common_code
142//   mov XXX, %L0       %L0 gets: Klass index <<8 + method index (note: max method index 255)
143//
144// Restore time:
145//   1. initialize_shared_space: reserve space for table
146//   2. init_self_patching_vtbl_list: update pointers to NEW virtual method addrs in text
147//
148// Execution time:
149//   First virtual method call for any object of these metadata types:
150//   1. object->klass
151//   2. vtable entry for that klass points to the jump table entries
152//   3. branches to common_code with %O0/klass, %L0: Klass index <<8 + method index
153//   4. common_code:
154//      Get address of new vtbl pointer for this Klass from updated table
155//      Update new vtbl pointer in the Klass: future virtual calls go direct
156//      Jump to method, using new vtbl pointer and method index
157
158
159static void* find_matching_vtbl_ptr(void** vtbl_list, void* new_vtable_start, void* obj) {
160  void* old_vtbl_ptr = *(void**)obj;
161  for (int i = 0; i < MetaspaceShared::vtbl_list_size; i++) {
162    if (vtbl_list[i] == old_vtbl_ptr) {
163      return (void**)new_vtable_start + i * MetaspaceShared::num_virtuals;
164    }
165  }
166  ShouldNotReachHere();
167  return NULL;
168}
169
170// Assumes the vtable is in first slot in object.
171static void patch_klass_vtables(void** vtbl_list, void* new_vtable_start) {
172  int n = _global_klass_objects->length();
173  for (int i = 0; i < n; i++) {
174    Klass* obj = _global_klass_objects->at(i);
175    // Note oop_is_instance() is a virtual call.  After patching vtables
176    // all virtual calls on the dummy vtables will restore the original!
177    if (obj->oop_is_instance()) {
178      InstanceKlass* ik = InstanceKlass::cast(obj);
179      *(void**)ik = find_matching_vtbl_ptr(vtbl_list, new_vtable_start, ik);
180      ConstantPool* cp = ik->constants();
181      *(void**)cp = find_matching_vtbl_ptr(vtbl_list, new_vtable_start, cp);
182      for (int j = 0; j < ik->methods()->length(); j++) {
183        Method* m = ik->methods()->at(j);
184        *(void**)m = find_matching_vtbl_ptr(vtbl_list, new_vtable_start, m);
185      }
186    } else {
187      // Array klasses
188      Klass* k = obj;
189      *(void**)k = find_matching_vtbl_ptr(vtbl_list, new_vtable_start, k);
190    }
191  }
192}
193
194// Closure for serializing initialization data out to a data area to be
195// written to the shared file.
196
197class WriteClosure : public SerializeClosure {
198private:
199  intptr_t* top;
200  char* end;
201
202  inline void check_space() {
203    if ((char*)top + sizeof(intptr_t) > end) {
204      report_out_of_shared_space(SharedMiscData);
205    }
206  }
207
208public:
209  WriteClosure(char* md_top, char* md_end) {
210    top = (intptr_t*)md_top;
211    end = md_end;
212  }
213
214  char* get_top() { return (char*)top; }
215
216  void do_ptr(void** p) {
217    check_space();
218    *top = (intptr_t)*p;
219    ++top;
220  }
221
222  void do_tag(int tag) {
223    check_space();
224    *top = (intptr_t)tag;
225    ++top;
226  }
227
228  void do_region(u_char* start, size_t size) {
229    if ((char*)top + size > end) {
230      report_out_of_shared_space(SharedMiscData);
231    }
232    assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
233    assert(size % sizeof(intptr_t) == 0, "bad size");
234    do_tag((int)size);
235    while (size > 0) {
236      *top = *(intptr_t*)start;
237      ++top;
238      start += sizeof(intptr_t);
239      size -= sizeof(intptr_t);
240    }
241  }
242
243  bool reading() const { return false; }
244};
245
246
247// Populate the shared space.
248
249class VM_PopulateDumpSharedSpace: public VM_Operation {
250private:
251  ClassLoaderData* _loader_data;
252  GrowableArray<Klass*> *_class_promote_order;
253  VirtualSpace _md_vs;
254  VirtualSpace _mc_vs;
255
256public:
257  VM_PopulateDumpSharedSpace(ClassLoaderData* loader_data,
258                             GrowableArray<Klass*> *class_promote_order) :
259    _loader_data(loader_data) {
260
261    // Split up and initialize the misc code and data spaces
262    ReservedSpace* shared_rs = MetaspaceShared::shared_rs();
263    int metadata_size = SharedReadOnlySize+SharedReadWriteSize;
264    ReservedSpace shared_ro_rw = shared_rs->first_part(metadata_size);
265    ReservedSpace misc_section = shared_rs->last_part(metadata_size);
266
267    // Now split into misc sections.
268    ReservedSpace md_rs   = misc_section.first_part(SharedMiscDataSize);
269    ReservedSpace mc_rs   = misc_section.last_part(SharedMiscDataSize);
270    _md_vs.initialize(md_rs, SharedMiscDataSize);
271    _mc_vs.initialize(mc_rs, SharedMiscCodeSize);
272    _class_promote_order = class_promote_order;
273  }
274
275  VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
276  void doit();   // outline because gdb sucks
277}; // class VM_PopulateDumpSharedSpace
278
279
280void VM_PopulateDumpSharedSpace::doit() {
281  Thread* THREAD = VMThread::vm_thread();
282  NOT_PRODUCT(SystemDictionary::verify();)
283  // The following guarantee is meant to ensure that no loader constraints
284  // exist yet, since the constraints table is not shared.  This becomes
285  // more important now that we don't re-initialize vtables/itables for
286  // shared classes at runtime, where constraints were previously created.
287  guarantee(SystemDictionary::constraints()->number_of_entries() == 0,
288            "loader constraints are not saved");
289  guarantee(SystemDictionary::placeholders()->number_of_entries() == 0,
290          "placeholders are not saved");
291  // Revisit and implement this if we prelink method handle call sites:
292  guarantee(SystemDictionary::invoke_method_table() == NULL ||
293            SystemDictionary::invoke_method_table()->number_of_entries() == 0,
294            "invoke method table is not saved");
295
296  // At this point, many classes have been loaded.
297  // Gather systemDictionary classes in a global array and do everything to
298  // that so we don't have to walk the SystemDictionary again.
299  _global_klass_objects = new GrowableArray<Klass*>(1000);
300  Universe::basic_type_classes_do(collect_classes);
301  SystemDictionary::classes_do(collect_classes);
302
303  tty->print_cr("Number of classes %d", _global_klass_objects->length());
304
305  // Update all the fingerprints in the shared methods.
306  tty->print("Calculating fingerprints ... ");
307  calculate_fingerprints();
308  tty->print_cr("done. ");
309
310  // Remove all references outside the metadata
311  tty->print("Removing unshareable information ... ");
312  remove_unshareable_in_classes();
313  tty->print_cr("done. ");
314
315  // Set up the share data and shared code segments.
316  char* md_low = _md_vs.low();
317  char* md_top = md_low;
318  char* md_end = _md_vs.high();
319  char* mc_low = _mc_vs.low();
320  char* mc_top = mc_low;
321  char* mc_end = _mc_vs.high();
322
323  // Reserve space for the list of Klass*s whose vtables are used
324  // for patching others as needed.
325
326  void** vtbl_list = (void**)md_top;
327  int vtbl_list_size = MetaspaceShared::vtbl_list_size;
328  Universe::init_self_patching_vtbl_list(vtbl_list, vtbl_list_size);
329
330  md_top += vtbl_list_size * sizeof(void*);
331  void* vtable = md_top;
332
333  // Reserve space for a new dummy vtable for klass objects in the
334  // heap.  Generate self-patching vtable entries.
335
336  MetaspaceShared::generate_vtable_methods(vtbl_list, &vtable,
337                                     &md_top, md_end,
338                                     &mc_top, mc_end);
339
340  // Reorder the system dictionary.  (Moving the symbols affects
341  // how the hash table indices are calculated.)
342  // Not doing this either.
343
344  SystemDictionary::reorder_dictionary();
345
346  NOT_PRODUCT(SystemDictionary::verify();)
347
348  // Copy the the symbol table, and the system dictionary to the shared
349  // space in usable form.  Copy the hastable
350  // buckets first [read-write], then copy the linked lists of entries
351  // [read-only].
352
353  SymbolTable::reverse(md_top);
354  NOT_PRODUCT(SymbolTable::verify());
355  SymbolTable::copy_buckets(&md_top, md_end);
356
357  SystemDictionary::reverse();
358  SystemDictionary::copy_buckets(&md_top, md_end);
359
360  ClassLoader::verify();
361  ClassLoader::copy_package_info_buckets(&md_top, md_end);
362  ClassLoader::verify();
363
364  SymbolTable::copy_table(&md_top, md_end);
365  SystemDictionary::copy_table(&md_top, md_end);
366  ClassLoader::verify();
367  ClassLoader::copy_package_info_table(&md_top, md_end);
368  ClassLoader::verify();
369
370  // Write the other data to the output array.
371  WriteClosure wc(md_top, md_end);
372  MetaspaceShared::serialize(&wc);
373  md_top = wc.get_top();
374
375  // Print shared spaces all the time
376  const char* fmt = "%s space: " PTR_FORMAT " out of " PTR_FORMAT " words allocated at " PTR_FORMAT ".";
377  Metaspace* ro_space = _loader_data->ro_metaspace();
378  Metaspace* rw_space = _loader_data->rw_metaspace();
379  tty->print_cr(fmt, "ro", ro_space->used_words(Metaspace::NonClassType),
380                ro_space->capacity_words(Metaspace::NonClassType),
381                ro_space->bottom());
382  tty->print_cr(fmt, "rw", rw_space->used_words(Metaspace::NonClassType),
383                rw_space->capacity_words(Metaspace::NonClassType),
384                rw_space->bottom());
385  tty->print_cr(fmt, "md", md_top - md_low, md_end-md_low, md_low);
386  tty->print_cr(fmt, "mc", mc_top - mc_low, mc_end-mc_low, mc_low);
387
388  // Update the vtable pointers in all of the Klass objects in the
389  // heap. They should point to newly generated vtable.
390  patch_klass_vtables(vtbl_list, vtable);
391
392  // dunno what this is for.
393  char* saved_vtbl = (char*)os::malloc(vtbl_list_size * sizeof(void*), mtClass);
394  memmove(saved_vtbl, vtbl_list, vtbl_list_size * sizeof(void*));
395  memset(vtbl_list, 0, vtbl_list_size * sizeof(void*));
396
397  // Create and write the archive file that maps the shared spaces.
398
399  FileMapInfo* mapinfo = new FileMapInfo();
400  mapinfo->populate_header(MetaspaceShared::max_alignment());
401
402  // Pass 1 - update file offsets in header.
403  mapinfo->write_header();
404  mapinfo->write_space(MetaspaceShared::ro, _loader_data->ro_metaspace(), true);
405  mapinfo->write_space(MetaspaceShared::rw, _loader_data->rw_metaspace(), false);
406  mapinfo->write_region(MetaspaceShared::md, _md_vs.low(),
407                        pointer_delta(md_top, _md_vs.low(), sizeof(char)),
408                        SharedMiscDataSize,
409                        false, false);
410  mapinfo->write_region(MetaspaceShared::mc, _mc_vs.low(),
411                        pointer_delta(mc_top, _mc_vs.low(), sizeof(char)),
412                        SharedMiscCodeSize,
413                        true, true);
414
415  // Pass 2 - write data.
416  mapinfo->open_for_write();
417  mapinfo->write_header();
418  mapinfo->write_space(MetaspaceShared::ro, _loader_data->ro_metaspace(), true);
419  mapinfo->write_space(MetaspaceShared::rw, _loader_data->rw_metaspace(), false);
420  mapinfo->write_region(MetaspaceShared::md, _md_vs.low(),
421                        pointer_delta(md_top, _md_vs.low(), sizeof(char)),
422                        SharedMiscDataSize,
423                        false, false);
424  mapinfo->write_region(MetaspaceShared::mc, _mc_vs.low(),
425                        pointer_delta(mc_top, _mc_vs.low(), sizeof(char)),
426                        SharedMiscCodeSize,
427                        true, true);
428  mapinfo->close();
429
430  memmove(vtbl_list, saved_vtbl, vtbl_list_size * sizeof(void*));
431}
432
433static void link_shared_classes(Klass* obj, TRAPS) {
434  Klass* k = Klass::cast(obj);
435  if (k->oop_is_instance()) {
436    InstanceKlass* ik = (InstanceKlass*) k;
437    // Link the class to cause the bytecodes to be rewritten and the
438    // cpcache to be created.
439    if (ik->init_state() < InstanceKlass::linked) {
440      ik->link_class(THREAD);
441      guarantee(!HAS_PENDING_EXCEPTION, "exception in class rewriting");
442    }
443  }
444}
445
446
447// Support for a simple checksum of the contents of the class list
448// file to prevent trivial tampering. The algorithm matches that in
449// the MakeClassList program used by the J2SE build process.
450#define JSUM_SEED ((jlong)CONST64(0xcafebabebabecafe))
451static jlong
452jsum(jlong start, const char *buf, const int len)
453{
454    jlong h = start;
455    char *p = (char *)buf, *e = p + len;
456    while (p < e) {
457        char c = *p++;
458        if (c <= ' ') {
459            /* Skip spaces and control characters */
460            continue;
461        }
462        h = 31 * h + c;
463    }
464    return h;
465}
466
467// Preload classes from a list, populate the shared spaces and dump to a
468// file.
469void MetaspaceShared::preload_and_dump(TRAPS) {
470  TraceTime timer("Dump Shared Spaces", TraceStartupTime);
471  ResourceMark rm;
472
473  // Lock out GC - is it necessary? I don't think we care.
474  No_GC_Verifier no_gc;
475
476  // Preload classes to be shared.
477  // Should use some os:: method rather than fopen() here. aB.
478  // Construct the path to the class list (in jre/lib)
479  // Walk up two directories from the location of the VM and
480  // optionally tack on "lib" (depending on platform)
481  char class_list_path[JVM_MAXPATHLEN];
482  os::jvm_path(class_list_path, sizeof(class_list_path));
483  for (int i = 0; i < 3; i++) {
484    char *end = strrchr(class_list_path, *os::file_separator());
485    if (end != NULL) *end = '\0';
486  }
487  int class_list_path_len = (int)strlen(class_list_path);
488  if (class_list_path_len >= 3) {
489    if (strcmp(class_list_path + class_list_path_len - 3, "lib") != 0) {
490      strcat(class_list_path, os::file_separator());
491      strcat(class_list_path, "lib");
492    }
493  }
494  strcat(class_list_path, os::file_separator());
495  strcat(class_list_path, "classlist");
496
497  FILE* file = fopen(class_list_path, "r");
498  if (file != NULL) {
499    jlong computed_jsum  = JSUM_SEED;
500    jlong file_jsum      = 0;
501
502    char class_name[256];
503    int class_count = 0;
504    GrowableArray<Klass*>* class_promote_order = new GrowableArray<Klass*>();
505
506    // sun.io.Converters
507    static const char obj_array_sig[] = "[[Ljava/lang/Object;";
508    SymbolTable::new_permanent_symbol(obj_array_sig, THREAD);
509
510    // java.util.HashMap
511    static const char map_entry_array_sig[] = "[Ljava/util/Map$Entry;";
512    SymbolTable::new_permanent_symbol(map_entry_array_sig, THREAD);
513
514    tty->print("Loading classes to share ... ");
515    while ((fgets(class_name, sizeof class_name, file)) != NULL) {
516      if (*class_name == '#') {
517        jint fsh, fsl;
518        if (sscanf(class_name, "# %8x%8x\n", &fsh, &fsl) == 2) {
519          file_jsum = ((jlong)(fsh) << 32) | (fsl & 0xffffffff);
520        }
521
522        continue;
523      }
524      // Remove trailing newline
525      size_t name_len = strlen(class_name);
526      class_name[name_len-1] = '\0';
527
528      computed_jsum = jsum(computed_jsum, class_name, (const int)name_len - 1);
529
530      // Got a class name - load it.
531      TempNewSymbol class_name_symbol = SymbolTable::new_permanent_symbol(class_name, THREAD);
532      guarantee(!HAS_PENDING_EXCEPTION, "Exception creating a symbol.");
533      Klass* klass = SystemDictionary::resolve_or_null(class_name_symbol,
534                                                         THREAD);
535      guarantee(!HAS_PENDING_EXCEPTION, "Exception resolving a class.");
536      if (klass != NULL) {
537        if (PrintSharedSpaces && Verbose && WizardMode) {
538          tty->print_cr("Shared spaces preloaded: %s", class_name);
539        }
540
541
542        InstanceKlass* ik = InstanceKlass::cast(klass);
543
544        // Should be class load order as per -XX:+TraceClassLoadingPreorder
545        class_promote_order->append(ik);
546
547        // Link the class to cause the bytecodes to be rewritten and the
548        // cpcache to be created. The linking is done as soon as classes
549        // are loaded in order that the related data structures (klass and
550        // cpCache) are located together.
551
552        if (ik->init_state() < InstanceKlass::linked) {
553          ik->link_class(THREAD);
554          guarantee(!(HAS_PENDING_EXCEPTION), "exception in class rewriting");
555        }
556
557        // TODO: Resolve klasses in constant pool
558        ik->constants()->resolve_class_constants(THREAD);
559
560        class_count++;
561      } else {
562        if (PrintSharedSpaces && Verbose && WizardMode) {
563          tty->cr();
564          tty->print_cr(" Preload failed: %s", class_name);
565        }
566      }
567      file_jsum = 0; // Checksum must be on last line of file
568    }
569    if (computed_jsum != file_jsum) {
570      tty->cr();
571      tty->print_cr("Preload failed: checksum of class list was incorrect.");
572      exit(1);
573    }
574
575    tty->print_cr("done. ");
576
577    if (PrintSharedSpaces) {
578      tty->print_cr("Shared spaces: preloaded %d classes", class_count);
579    }
580
581    // Rewrite and unlink classes.
582    tty->print("Rewriting and linking classes ... ");
583
584    // Link any classes which got missed.  (It's not quite clear why
585    // they got missed.)  This iteration would be unsafe if we weren't
586    // single-threaded at this point; however we can't do it on the VM
587    // thread because it requires object allocation.
588    SystemDictionary::classes_do(link_shared_classes, CATCH);
589    tty->print_cr("done. ");
590
591    // Create and dump the shared spaces.   Everything so far is loaded
592    // with the null class loader.
593    ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
594    VM_PopulateDumpSharedSpace op(loader_data, class_promote_order);
595    VMThread::execute(&op);
596
597  } else {
598    char errmsg[JVM_MAXPATHLEN];
599    os::lasterror(errmsg, JVM_MAXPATHLEN);
600    tty->print_cr("Loading classlist failed: %s", errmsg);
601    exit(1);
602  }
603
604  // Since various initialization steps have been undone by this process,
605  // it is not reasonable to continue running a java process.
606  exit(0);
607}
608
609
610// Closure for serializing initialization data in from a data area
611// (ptr_array) read from the shared file.
612
613class ReadClosure : public SerializeClosure {
614private:
615  intptr_t** _ptr_array;
616
617  inline intptr_t nextPtr() {
618    return *(*_ptr_array)++;
619  }
620
621public:
622  ReadClosure(intptr_t** ptr_array) { _ptr_array = ptr_array; }
623
624  void do_ptr(void** p) {
625    assert(*p == NULL, "initializing previous initialized pointer.");
626    intptr_t obj = nextPtr();
627    assert((intptr_t)obj >= 0 || (intptr_t)obj < -100,
628           "hit tag while initializing ptrs.");
629    *p = (void*)obj;
630  }
631
632  void do_tag(int tag) {
633    int old_tag;
634    old_tag = (int)(intptr_t)nextPtr();
635    // do_int(&old_tag);
636    assert(tag == old_tag, "old tag doesn't match");
637    FileMapInfo::assert_mark(tag == old_tag);
638  }
639
640  void do_region(u_char* start, size_t size) {
641    assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
642    assert(size % sizeof(intptr_t) == 0, "bad size");
643    do_tag((int)size);
644    while (size > 0) {
645      *(intptr_t*)start = nextPtr();
646      start += sizeof(intptr_t);
647      size -= sizeof(intptr_t);
648    }
649  }
650
651  bool reading() const { return true; }
652};
653
654
655// Save bounds of shared spaces mapped in.
656static char* _ro_base = NULL;
657static char* _rw_base = NULL;
658static char* _md_base = NULL;
659static char* _mc_base = NULL;
660
661// Return true if given address is in the mapped shared space.
662bool MetaspaceShared::is_in_shared_space(const void* p) {
663  if (_ro_base == NULL || _rw_base == NULL) {
664    return false;
665  } else {
666    return ((p > _ro_base && p < (_ro_base + SharedReadOnlySize)) ||
667            (p > _rw_base && p < (_rw_base + SharedReadWriteSize)));
668  }
669}
670
671void MetaspaceShared::print_shared_spaces() {
672  gclog_or_tty->print_cr("Shared Spaces:");
673  gclog_or_tty->print("  read-only " INTPTR_FORMAT "-" INTPTR_FORMAT,
674    _ro_base, _ro_base + SharedReadOnlySize);
675  gclog_or_tty->print("  read-write " INTPTR_FORMAT "-" INTPTR_FORMAT,
676    _rw_base, _rw_base + SharedReadWriteSize);
677  gclog_or_tty->cr();
678  gclog_or_tty->print("  misc-data " INTPTR_FORMAT "-" INTPTR_FORMAT,
679    _md_base, _md_base + SharedMiscDataSize);
680  gclog_or_tty->print("  misc-code " INTPTR_FORMAT "-" INTPTR_FORMAT,
681    _mc_base, _mc_base + SharedMiscCodeSize);
682  gclog_or_tty->cr();
683}
684
685
686// Map shared spaces at requested addresses and return if succeeded.
687// Need to keep the bounds of the ro and rw space for the Metaspace::contains
688// call, or is_in_shared_space.
689bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
690  size_t image_alignment = mapinfo->alignment();
691
692  // Map in the shared memory and then map the regions on top of it
693  ReservedSpace shared_rs = mapinfo->reserve_shared_memory();
694  if (!shared_rs.is_reserved()) return false;
695
696  // Split reserved memory into pieces (windows needs this)
697  ReservedSpace ro_rs   = shared_rs.first_part(SharedReadOnlySize);
698  ReservedSpace tmp_rs1 = shared_rs.last_part(SharedReadOnlySize);
699  ReservedSpace rw_rs   = tmp_rs1.first_part(SharedReadWriteSize);
700  ReservedSpace tmp_rs2 = tmp_rs1.last_part(SharedReadWriteSize);
701  ReservedSpace md_rs   = tmp_rs2.first_part(SharedMiscDataSize);
702  ReservedSpace mc_rs   = tmp_rs2.last_part(SharedMiscDataSize);
703
704  // Map each shared region
705  if ((_ro_base = mapinfo->map_region(ro)) != NULL &&
706      (_rw_base = mapinfo->map_region(rw)) != NULL &&
707      (_md_base = mapinfo->map_region(md)) != NULL &&
708      (_mc_base = mapinfo->map_region(mc)) != NULL &&
709      (image_alignment == (size_t)max_alignment())) {
710    // Success (no need to do anything)
711    return true;
712  } else {
713    // If there was a failure in mapping any of the spaces, unmap the ones
714    // that succeeded
715    if (_ro_base != NULL) mapinfo->unmap_region(ro);
716    if (_rw_base != NULL) mapinfo->unmap_region(rw);
717    if (_md_base != NULL) mapinfo->unmap_region(md);
718    if (_mc_base != NULL) mapinfo->unmap_region(mc);
719    // Release the entire mapped region
720    shared_rs.release();
721    // If -Xshare:on is specified, print out the error message and exit VM,
722    // otherwise, set UseSharedSpaces to false and continue.
723    if (RequireSharedSpaces) {
724      vm_exit_during_initialization("Unable to use shared archive.", NULL);
725    } else {
726      FLAG_SET_DEFAULT(UseSharedSpaces, false);
727    }
728    return false;
729  }
730}
731
732// Read the miscellaneous data from the shared file, and
733// serialize it out to its various destinations.
734
735void MetaspaceShared::initialize_shared_spaces() {
736  FileMapInfo *mapinfo = FileMapInfo::current_info();
737
738  char* buffer = mapinfo->region_base(md);
739
740  // Skip over (reserve space for) a list of addresses of C++ vtables
741  // for Klass objects.  They get filled in later.
742
743  void** vtbl_list = (void**)buffer;
744  buffer += MetaspaceShared::vtbl_list_size * sizeof(void*);
745  Universe::init_self_patching_vtbl_list(vtbl_list, vtbl_list_size);
746
747  // Skip over (reserve space for) dummy C++ vtables Klass objects.
748  // They are used as is.
749
750  intptr_t vtable_size = *(intptr_t*)buffer;
751  buffer += sizeof(intptr_t);
752  buffer += vtable_size;
753
754  // Create the symbol table using the bucket array at this spot in the
755  // misc data space.  Since the symbol table is often modified, this
756  // region (of mapped pages) will be copy-on-write.
757
758  int symbolTableLen = *(intptr_t*)buffer;
759  buffer += sizeof(intptr_t);
760  int number_of_entries = *(intptr_t*)buffer;
761  buffer += sizeof(intptr_t);
762  SymbolTable::create_table((HashtableBucket<mtSymbol>*)buffer, symbolTableLen,
763                            number_of_entries);
764  buffer += symbolTableLen;
765
766  // Create the shared dictionary using the bucket array at this spot in
767  // the misc data space.  Since the shared dictionary table is never
768  // modified, this region (of mapped pages) will be (effectively, if
769  // not explicitly) read-only.
770
771  int sharedDictionaryLen = *(intptr_t*)buffer;
772  buffer += sizeof(intptr_t);
773  number_of_entries = *(intptr_t*)buffer;
774  buffer += sizeof(intptr_t);
775  SystemDictionary::set_shared_dictionary((HashtableBucket<mtClass>*)buffer,
776                                          sharedDictionaryLen,
777                                          number_of_entries);
778  buffer += sharedDictionaryLen;
779
780  // Create the package info table using the bucket array at this spot in
781  // the misc data space.  Since the package info table is never
782  // modified, this region (of mapped pages) will be (effectively, if
783  // not explicitly) read-only.
784
785  int pkgInfoLen = *(intptr_t*)buffer;
786  buffer += sizeof(intptr_t);
787  number_of_entries = *(intptr_t*)buffer;
788  buffer += sizeof(intptr_t);
789  ClassLoader::create_package_info_table((HashtableBucket<mtClass>*)buffer, pkgInfoLen,
790                                         number_of_entries);
791  buffer += pkgInfoLen;
792  ClassLoader::verify();
793
794  // The following data in the shared misc data region are the linked
795  // list elements (HashtableEntry objects) for the symbol table, string
796  // table, and shared dictionary.  The heap objects refered to by the
797  // symbol table, string table, and shared dictionary are permanent and
798  // unmovable.  Since new entries added to the string and symbol tables
799  // are always added at the beginning of the linked lists, THESE LINKED
800  // LIST ELEMENTS ARE READ-ONLY.
801
802  int len = *(intptr_t*)buffer; // skip over symbol table entries
803  buffer += sizeof(intptr_t);
804  buffer += len;
805
806  len = *(intptr_t*)buffer;     // skip over shared dictionary entries
807  buffer += sizeof(intptr_t);
808  buffer += len;
809
810  len = *(intptr_t*)buffer;     // skip over package info table entries
811  buffer += sizeof(intptr_t);
812  buffer += len;
813
814  len = *(intptr_t*)buffer;     // skip over package info table char[] arrays.
815  buffer += sizeof(intptr_t);
816  buffer += len;
817
818  intptr_t* array = (intptr_t*)buffer;
819  ReadClosure rc(&array);
820  serialize(&rc);
821
822  // Close the mapinfo file
823  mapinfo->close();
824}
825
826// JVM/TI RedefineClasses() support:
827bool MetaspaceShared::remap_shared_readonly_as_readwrite() {
828  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
829
830  if (UseSharedSpaces) {
831    // remap the shared readonly space to shared readwrite, private
832    FileMapInfo* mapinfo = FileMapInfo::current_info();
833    if (!mapinfo->remap_shared_readonly_as_readwrite()) {
834      return false;
835    }
836  }
837  return true;
838}
839