metaspaceShared.cpp revision 3602:da91efe96a93
1/*
2 * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/dictionary.hpp"
27#include "classfile/loaderConstraints.hpp"
28#include "classfile/placeholders.hpp"
29#include "classfile/symbolTable.hpp"
30#include "classfile/systemDictionary.hpp"
31#include "code/codeCache.hpp"
32#include "memory/filemap.hpp"
33#include "memory/gcLocker.hpp"
34#include "memory/metaspace.hpp"
35#include "memory/metaspaceShared.hpp"
36#include "oops/objArrayOop.hpp"
37#include "oops/oop.inline.hpp"
38#include "runtime/signature.hpp"
39#include "runtime/vm_operations.hpp"
40#include "runtime/vmThread.hpp"
41#include "utilities/hashtable.inline.hpp"
42
43
44int MetaspaceShared::_max_alignment = 0;
45int MetaspaceShared::max_alignment()                   { return _max_alignment; }
46void MetaspaceShared::set_max_alignment(int alignment) { _max_alignment = alignment; }
47
48// Accessor functions to save shared space created for metadata, which has
49// extra space allocated at the end for miscellaneous data and code.
50ReservedSpace* MetaspaceShared::_shared_rs = NULL;
51ReservedSpace* MetaspaceShared::shared_rs()            { return _shared_rs; }
52void MetaspaceShared::set_shared_rs(ReservedSpace* rs) { _shared_rs = rs; }
53
54// Read/write a data stream for restoring/preserving metadata pointers and
55// miscellaneous data from/to the shared archive file.
56
57void MetaspaceShared::serialize(SerializeClosure* soc) {
58  int tag = 0;
59  soc->do_tag(--tag);
60
61  assert(!UseCompressedOops, "UseCompressedOops doesn't work with shared archive");
62  // Verify the sizes of various metadata in the system.
63  soc->do_tag(sizeof(Method));
64  soc->do_tag(sizeof(ConstMethod));
65  soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE));
66  soc->do_tag(sizeof(ConstantPool));
67  soc->do_tag(sizeof(ConstantPoolCache));
68  soc->do_tag(objArrayOopDesc::base_offset_in_bytes());
69  soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE));
70  soc->do_tag(sizeof(Symbol));
71
72  // Dump/restore miscellaneous metadata.
73  Universe::serialize(soc, true);
74  soc->do_tag(--tag);
75
76  // Dump/restore references to commonly used names and signatures.
77  vmSymbols::serialize(soc);
78  soc->do_tag(--tag);
79
80  soc->do_tag(666);
81}
82
83
84// CDS code for dumping shared archive.
85
86// Global object for holding classes that have been loaded.  Since this
87// is run at a safepoint just before exit, this is the entire set of classes.
88static GrowableArray<Klass*>* _global_klass_objects;
89static void collect_classes(Klass* k) {
90  _global_klass_objects->append_if_missing(k);
91  if (k->oop_is_instance()) {
92    // Add in the array classes too
93    InstanceKlass* ik = InstanceKlass::cast(k);
94    ik->array_klasses_do(collect_classes);
95  }
96}
97
98static void remove_unshareable_in_classes() {
99  for (int i = 0; i < _global_klass_objects->length(); i++) {
100    Klass* k = _global_klass_objects->at(i);
101    k->remove_unshareable_info();
102  }
103}
104
105// Walk all methods in the class list and assign a fingerprint.
106// so that this part of the ConstMethod* is read only.
107static void calculate_fingerprints() {
108  for (int i = 0; i < _global_klass_objects->length(); i++) {
109    Klass* k = _global_klass_objects->at(i);
110    if (k->oop_is_instance()) {
111      InstanceKlass* ik = InstanceKlass::cast(k);
112      for (int i = 0; i < ik->methods()->length(); i++) {
113        ResourceMark rm;
114        Method* m = ik->methods()->at(i);
115        (new Fingerprinter(m))->fingerprint();
116      }
117    }
118  }
119}
120
121// Patch C++ vtable pointer in metadata.
122
123// Klass and other metadata objects contain references to c++ vtables in the
124// JVM library.
125// Fix them to point to our constructed vtables.  However, don't iterate
126// across the space while doing this, as that causes the vtables to be
127// patched, undoing our useful work.  Instead, iterate to make a list,
128// then use the list to do the fixing.
129//
130// Our constructed vtables:
131// Dump time:
132//  1. init_self_patching_vtbl_list: table of pointers to current virtual method addrs
133//  2. generate_vtable_methods: create jump table, appended to above vtbl_list
134//  3. patch_klass_vtables: for Klass list, patch the vtable entry in klass and
135//     associated metadata to point to jump table rather than to current vtbl
136// Table layout: NOTE FIXED SIZE
137//   1. vtbl pointers
138//   2. #Klass X #virtual methods per Klass
139//   1 entry for each, in the order:
140//   Klass1:method1 entry, Klass1:method2 entry, ... Klass1:method<num_virtuals> entry
141//   Klass2:method1 entry, Klass2:method2 entry, ... Klass2:method<num_virtuals> entry
142//   ...
143//   Klass<vtbl_list_size>:method1 entry, Klass<vtbl_list_size>:method2 entry,
144//       ... Klass<vtbl_list_size>:method<num_virtuals> entry
145//  Sample entry: (Sparc):
146//   save(sp, -256, sp)
147//   ba,pt common_code
148//   mov XXX, %L0       %L0 gets: Klass index <<8 + method index (note: max method index 255)
149//
150// Restore time:
151//   1. initialize_shared_space: reserve space for table
152//   2. init_self_patching_vtbl_list: update pointers to NEW virtual method addrs in text
153//
154// Execution time:
155//   First virtual method call for any object of these metadata types:
156//   1. object->klass->klass_part
157//   2. vtable entry for that klass_part points to the jump table entries
158//   3. branches to common_code with %O0/klass_part, %L0: Klass index <<8 + method index
159//   4. common_code:
160//      Get address of new vtbl pointer for this Klass from updated table
161//      Update new vtbl pointer in the Klass: future virtual calls go direct
162//      Jump to method, using new vtbl pointer and method index
163
164
165static void* find_matching_vtbl_ptr(void** vtbl_list, void* new_vtable_start, void* obj) {
166  void* old_vtbl_ptr = *(void**)obj;
167  for (int i = 0; i < MetaspaceShared::vtbl_list_size; i++) {
168    if (vtbl_list[i] == old_vtbl_ptr) {
169      return (void**)new_vtable_start + i * MetaspaceShared::num_virtuals;
170    }
171  }
172  ShouldNotReachHere();
173  return NULL;
174}
175
176// Assumes the vtable is in first slot in object.
177static void patch_klass_vtables(void** vtbl_list, void* new_vtable_start) {
178  int n = _global_klass_objects->length();
179  for (int i = 0; i < n; i++) {
180    Klass* obj = _global_klass_objects->at(i);
181    // Note oop_is_instance() is a virtual call.  After patching vtables
182    // all virtual calls on the dummy vtables will restore the original!
183    if (obj->oop_is_instance()) {
184      InstanceKlass* ik = InstanceKlass::cast(obj);
185      *(void**)ik = find_matching_vtbl_ptr(vtbl_list, new_vtable_start, ik);
186      ConstantPool* cp = ik->constants();
187      *(void**)cp = find_matching_vtbl_ptr(vtbl_list, new_vtable_start, cp);
188      for (int j = 0; j < ik->methods()->length(); j++) {
189        Method* m = ik->methods()->at(j);
190        *(void**)m = find_matching_vtbl_ptr(vtbl_list, new_vtable_start, m);
191      }
192    } else {
193      // Array klasses
194      Klass* k = obj;
195      *(void**)k = find_matching_vtbl_ptr(vtbl_list, new_vtable_start, k);
196    }
197  }
198}
199
200// Closure for serializing initialization data out to a data area to be
201// written to the shared file.
202
203class WriteClosure : public SerializeClosure {
204private:
205  intptr_t* top;
206  char* end;
207
208  inline void check_space() {
209    if ((char*)top + sizeof(intptr_t) > end) {
210      report_out_of_shared_space(SharedMiscData);
211    }
212  }
213
214public:
215  WriteClosure(char* md_top, char* md_end) {
216    top = (intptr_t*)md_top;
217    end = md_end;
218  }
219
220  char* get_top() { return (char*)top; }
221
222  void do_ptr(void** p) {
223    check_space();
224    *top = (intptr_t)*p;
225    ++top;
226  }
227
228  void do_tag(int tag) {
229    check_space();
230    *top = (intptr_t)tag;
231    ++top;
232  }
233
234  void do_region(u_char* start, size_t size) {
235    if ((char*)top + size > end) {
236      report_out_of_shared_space(SharedMiscData);
237    }
238    assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
239    assert(size % sizeof(intptr_t) == 0, "bad size");
240    do_tag((int)size);
241    while (size > 0) {
242      *top = *(intptr_t*)start;
243      ++top;
244      start += sizeof(intptr_t);
245      size -= sizeof(intptr_t);
246    }
247  }
248
249  bool reading() const { return false; }
250};
251
252
253// Populate the shared space.
254
255class VM_PopulateDumpSharedSpace: public VM_Operation {
256private:
257  ClassLoaderData* _loader_data;
258  GrowableArray<Klass*> *_class_promote_order;
259  VirtualSpace _md_vs;
260  VirtualSpace _mc_vs;
261
262public:
263  VM_PopulateDumpSharedSpace(ClassLoaderData* loader_data,
264                             GrowableArray<Klass*> *class_promote_order) :
265    _loader_data(loader_data) {
266
267    // Split up and initialize the misc code and data spaces
268    ReservedSpace* shared_rs = MetaspaceShared::shared_rs();
269    int metadata_size = SharedReadOnlySize+SharedReadWriteSize;
270    ReservedSpace shared_ro_rw = shared_rs->first_part(metadata_size);
271    ReservedSpace misc_section = shared_rs->last_part(metadata_size);
272
273    // Now split into misc sections.
274    ReservedSpace md_rs   = misc_section.first_part(SharedMiscDataSize);
275    ReservedSpace mc_rs   = misc_section.last_part(SharedMiscDataSize);
276    _md_vs.initialize(md_rs, SharedMiscDataSize);
277    _mc_vs.initialize(mc_rs, SharedMiscCodeSize);
278    _class_promote_order = class_promote_order;
279  }
280
281  VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
282  void doit();   // outline because gdb sucks
283}; // class VM_PopulateDumpSharedSpace
284
285
286void VM_PopulateDumpSharedSpace::doit() {
287  Thread* THREAD = VMThread::vm_thread();
288  NOT_PRODUCT(SystemDictionary::verify();)
289  // The following guarantee is meant to ensure that no loader constraints
290  // exist yet, since the constraints table is not shared.  This becomes
291  // more important now that we don't re-initialize vtables/itables for
292  // shared classes at runtime, where constraints were previously created.
293  guarantee(SystemDictionary::constraints()->number_of_entries() == 0,
294            "loader constraints are not saved");
295  guarantee(SystemDictionary::placeholders()->number_of_entries() == 0,
296          "placeholders are not saved");
297  // Revisit and implement this if we prelink method handle call sites:
298  guarantee(SystemDictionary::invoke_method_table() == NULL ||
299            SystemDictionary::invoke_method_table()->number_of_entries() == 0,
300            "invoke method table is not saved");
301
302  // At this point, many classes have been loaded.
303  // Gather systemDictionary classes in a global array and do everything to
304  // that so we don't have to walk the SystemDictionary again.
305  _global_klass_objects = new GrowableArray<Klass*>(1000);
306  Universe::basic_type_classes_do(collect_classes);
307  SystemDictionary::classes_do(collect_classes);
308
309  tty->print_cr("Number of classes %d", _global_klass_objects->length());
310
311  // Update all the fingerprints in the shared methods.
312  tty->print("Calculating fingerprints ... ");
313  calculate_fingerprints();
314  tty->print_cr("done. ");
315
316  // Remove all references outside the metadata
317  tty->print("Removing unshareable information ... ");
318  remove_unshareable_in_classes();
319  tty->print_cr("done. ");
320
321  // Set up the share data and shared code segments.
322  char* md_low = _md_vs.low();
323  char* md_top = md_low;
324  char* md_end = _md_vs.high();
325  char* mc_low = _mc_vs.low();
326  char* mc_top = mc_low;
327  char* mc_end = _mc_vs.high();
328
329  // Reserve space for the list of Klass*s whose vtables are used
330  // for patching others as needed.
331
332  void** vtbl_list = (void**)md_top;
333  int vtbl_list_size = MetaspaceShared::vtbl_list_size;
334  Universe::init_self_patching_vtbl_list(vtbl_list, vtbl_list_size);
335
336  md_top += vtbl_list_size * sizeof(void*);
337  void* vtable = md_top;
338
339  // Reserve space for a new dummy vtable for klass objects in the
340  // heap.  Generate self-patching vtable entries.
341
342  MetaspaceShared::generate_vtable_methods(vtbl_list, &vtable,
343                                     &md_top, md_end,
344                                     &mc_top, mc_end);
345
346  // Reorder the system dictionary.  (Moving the symbols affects
347  // how the hash table indices are calculated.)
348  // Not doing this either.
349
350  SystemDictionary::reorder_dictionary();
351
352  NOT_PRODUCT(SystemDictionary::verify();)
353
354  // Copy the the symbol table, and the system dictionary to the shared
355  // space in usable form.  Copy the hastable
356  // buckets first [read-write], then copy the linked lists of entries
357  // [read-only].
358
359  SymbolTable::reverse(md_top);
360  NOT_PRODUCT(SymbolTable::verify());
361  SymbolTable::copy_buckets(&md_top, md_end);
362
363  SystemDictionary::reverse();
364  SystemDictionary::copy_buckets(&md_top, md_end);
365
366  ClassLoader::verify();
367  ClassLoader::copy_package_info_buckets(&md_top, md_end);
368  ClassLoader::verify();
369
370  SymbolTable::copy_table(&md_top, md_end);
371  SystemDictionary::copy_table(&md_top, md_end);
372  ClassLoader::verify();
373  ClassLoader::copy_package_info_table(&md_top, md_end);
374  ClassLoader::verify();
375
376  // Write the other data to the output array.
377  WriteClosure wc(md_top, md_end);
378  MetaspaceShared::serialize(&wc);
379  md_top = wc.get_top();
380
381  // Print shared spaces all the time
382  const char* fmt = "%s space: " PTR_FORMAT " out of " PTR_FORMAT " words allocated at " PTR_FORMAT ".";
383  Metaspace* ro_space = _loader_data->ro_metaspace();
384  Metaspace* rw_space = _loader_data->rw_metaspace();
385  tty->print_cr(fmt, "ro", ro_space->used_words(Metaspace::NonClassType),
386                ro_space->capacity_words(Metaspace::NonClassType),
387                ro_space->bottom());
388  tty->print_cr(fmt, "rw", rw_space->used_words(Metaspace::NonClassType),
389                rw_space->capacity_words(Metaspace::NonClassType),
390                rw_space->bottom());
391  tty->print_cr(fmt, "md", md_top - md_low, md_end-md_low, md_low);
392  tty->print_cr(fmt, "mc", mc_top - mc_low, mc_end-mc_low, mc_low);
393
394  // Update the vtable pointers in all of the Klass objects in the
395  // heap. They should point to newly generated vtable.
396  patch_klass_vtables(vtbl_list, vtable);
397
398  // dunno what this is for.
399  char* saved_vtbl = (char*)os::malloc(vtbl_list_size * sizeof(void*), mtClass);
400  memmove(saved_vtbl, vtbl_list, vtbl_list_size * sizeof(void*));
401  memset(vtbl_list, 0, vtbl_list_size * sizeof(void*));
402
403  // Create and write the archive file that maps the shared spaces.
404
405  FileMapInfo* mapinfo = new FileMapInfo();
406  mapinfo->populate_header(MetaspaceShared::max_alignment());
407
408  // Pass 1 - update file offsets in header.
409  mapinfo->write_header();
410  mapinfo->write_space(MetaspaceShared::ro, _loader_data->ro_metaspace(), true);
411  mapinfo->write_space(MetaspaceShared::rw, _loader_data->rw_metaspace(), false);
412  mapinfo->write_region(MetaspaceShared::md, _md_vs.low(),
413                        pointer_delta(md_top, _md_vs.low(), sizeof(char)),
414                        SharedMiscDataSize,
415                        false, false);
416  mapinfo->write_region(MetaspaceShared::mc, _mc_vs.low(),
417                        pointer_delta(mc_top, _mc_vs.low(), sizeof(char)),
418                        SharedMiscCodeSize,
419                        true, true);
420
421  // Pass 2 - write data.
422  mapinfo->open_for_write();
423  mapinfo->write_header();
424  mapinfo->write_space(MetaspaceShared::ro, _loader_data->ro_metaspace(), true);
425  mapinfo->write_space(MetaspaceShared::rw, _loader_data->rw_metaspace(), false);
426  mapinfo->write_region(MetaspaceShared::md, _md_vs.low(),
427                        pointer_delta(md_top, _md_vs.low(), sizeof(char)),
428                        SharedMiscDataSize,
429                        false, false);
430  mapinfo->write_region(MetaspaceShared::mc, _mc_vs.low(),
431                        pointer_delta(mc_top, _mc_vs.low(), sizeof(char)),
432                        SharedMiscCodeSize,
433                        true, true);
434  mapinfo->close();
435
436  memmove(vtbl_list, saved_vtbl, vtbl_list_size * sizeof(void*));
437}
438
439static void link_shared_classes(Klass* obj, TRAPS) {
440  Klass* k = Klass::cast(obj);
441  if (k->oop_is_instance()) {
442    InstanceKlass* ik = (InstanceKlass*) k;
443    // Link the class to cause the bytecodes to be rewritten and the
444    // cpcache to be created.
445    if (ik->init_state() < InstanceKlass::linked) {
446      ik->link_class(THREAD);
447      guarantee(!HAS_PENDING_EXCEPTION, "exception in class rewriting");
448    }
449  }
450}
451
452
453// Support for a simple checksum of the contents of the class list
454// file to prevent trivial tampering. The algorithm matches that in
455// the MakeClassList program used by the J2SE build process.
456#define JSUM_SEED ((jlong)CONST64(0xcafebabebabecafe))
457static jlong
458jsum(jlong start, const char *buf, const int len)
459{
460    jlong h = start;
461    char *p = (char *)buf, *e = p + len;
462    while (p < e) {
463        char c = *p++;
464        if (c <= ' ') {
465            /* Skip spaces and control characters */
466            continue;
467        }
468        h = 31 * h + c;
469    }
470    return h;
471}
472
473// Preload classes from a list, populate the shared spaces and dump to a
474// file.
475void MetaspaceShared::preload_and_dump(TRAPS) {
476  TraceTime timer("Dump Shared Spaces", TraceStartupTime);
477  ResourceMark rm;
478
479  // Lock out GC - is it necessary? I don't think we care.
480  No_GC_Verifier no_gc;
481
482  // Preload classes to be shared.
483  // Should use some os:: method rather than fopen() here. aB.
484  // Construct the path to the class list (in jre/lib)
485  // Walk up two directories from the location of the VM and
486  // optionally tack on "lib" (depending on platform)
487  char class_list_path[JVM_MAXPATHLEN];
488  os::jvm_path(class_list_path, sizeof(class_list_path));
489  for (int i = 0; i < 3; i++) {
490    char *end = strrchr(class_list_path, *os::file_separator());
491    if (end != NULL) *end = '\0';
492  }
493  int class_list_path_len = (int)strlen(class_list_path);
494  if (class_list_path_len >= 3) {
495    if (strcmp(class_list_path + class_list_path_len - 3, "lib") != 0) {
496      strcat(class_list_path, os::file_separator());
497      strcat(class_list_path, "lib");
498    }
499  }
500  strcat(class_list_path, os::file_separator());
501  strcat(class_list_path, "classlist");
502
503  FILE* file = fopen(class_list_path, "r");
504  if (file != NULL) {
505    jlong computed_jsum  = JSUM_SEED;
506    jlong file_jsum      = 0;
507
508    char class_name[256];
509    int class_count = 0;
510    GrowableArray<Klass*>* class_promote_order = new GrowableArray<Klass*>();
511
512    // sun.io.Converters
513    static const char obj_array_sig[] = "[[Ljava/lang/Object;";
514    SymbolTable::new_permanent_symbol(obj_array_sig, THREAD);
515
516    // java.util.HashMap
517    static const char map_entry_array_sig[] = "[Ljava/util/Map$Entry;";
518    SymbolTable::new_permanent_symbol(map_entry_array_sig, THREAD);
519
520    tty->print("Loading classes to share ... ");
521    while ((fgets(class_name, sizeof class_name, file)) != NULL) {
522      if (*class_name == '#') {
523        jint fsh, fsl;
524        if (sscanf(class_name, "# %8x%8x\n", &fsh, &fsl) == 2) {
525          file_jsum = ((jlong)(fsh) << 32) | (fsl & 0xffffffff);
526        }
527
528        continue;
529      }
530      // Remove trailing newline
531      size_t name_len = strlen(class_name);
532      class_name[name_len-1] = '\0';
533
534      computed_jsum = jsum(computed_jsum, class_name, (const int)name_len - 1);
535
536      // Got a class name - load it.
537      TempNewSymbol class_name_symbol = SymbolTable::new_permanent_symbol(class_name, THREAD);
538      guarantee(!HAS_PENDING_EXCEPTION, "Exception creating a symbol.");
539      Klass* klass = SystemDictionary::resolve_or_null(class_name_symbol,
540                                                         THREAD);
541      guarantee(!HAS_PENDING_EXCEPTION, "Exception resolving a class.");
542      if (klass != NULL) {
543        if (PrintSharedSpaces && Verbose && WizardMode) {
544          tty->print_cr("Shared spaces preloaded: %s", class_name);
545        }
546
547
548        InstanceKlass* ik = InstanceKlass::cast(klass);
549
550        // Should be class load order as per -XX:+TraceClassLoadingPreorder
551        class_promote_order->append(ik);
552
553        // Link the class to cause the bytecodes to be rewritten and the
554        // cpcache to be created. The linking is done as soon as classes
555        // are loaded in order that the related data structures (klass and
556        // cpCache) are located together.
557
558        if (ik->init_state() < InstanceKlass::linked) {
559          ik->link_class(THREAD);
560          guarantee(!(HAS_PENDING_EXCEPTION), "exception in class rewriting");
561        }
562
563        // TODO: Resolve klasses in constant pool
564        ik->constants()->resolve_class_constants(THREAD);
565
566        class_count++;
567      } else {
568        if (PrintSharedSpaces && Verbose && WizardMode) {
569          tty->cr();
570          tty->print_cr(" Preload failed: %s", class_name);
571        }
572      }
573      file_jsum = 0; // Checksum must be on last line of file
574    }
575    if (computed_jsum != file_jsum) {
576      tty->cr();
577      tty->print_cr("Preload failed: checksum of class list was incorrect.");
578      exit(1);
579    }
580
581    tty->print_cr("done. ");
582
583    if (PrintSharedSpaces) {
584      tty->print_cr("Shared spaces: preloaded %d classes", class_count);
585    }
586
587    // Rewrite and unlink classes.
588    tty->print("Rewriting and linking classes ... ");
589
590    // Link any classes which got missed.  (It's not quite clear why
591    // they got missed.)  This iteration would be unsafe if we weren't
592    // single-threaded at this point; however we can't do it on the VM
593    // thread because it requires object allocation.
594    SystemDictionary::classes_do(link_shared_classes, CATCH);
595    tty->print_cr("done. ");
596
597    // Create and dump the shared spaces.   Everything so far is loaded
598    // with the null class loader.
599    ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
600    VM_PopulateDumpSharedSpace op(loader_data, class_promote_order);
601    VMThread::execute(&op);
602
603  } else {
604    char errmsg[JVM_MAXPATHLEN];
605    os::lasterror(errmsg, JVM_MAXPATHLEN);
606    tty->print_cr("Loading classlist failed: %s", errmsg);
607    exit(1);
608  }
609
610  // Since various initialization steps have been undone by this process,
611  // it is not reasonable to continue running a java process.
612  exit(0);
613}
614
615
616// Closure for serializing initialization data in from a data area
617// (ptr_array) read from the shared file.
618
619class ReadClosure : public SerializeClosure {
620private:
621  intptr_t** _ptr_array;
622
623  inline intptr_t nextPtr() {
624    return *(*_ptr_array)++;
625  }
626
627public:
628  ReadClosure(intptr_t** ptr_array) { _ptr_array = ptr_array; }
629
630  void do_ptr(void** p) {
631    assert(*p == NULL, "initializing previous initialized pointer.");
632    intptr_t obj = nextPtr();
633    assert((intptr_t)obj >= 0 || (intptr_t)obj < -100,
634           "hit tag while initializing ptrs.");
635    *p = (void*)obj;
636  }
637
638  void do_tag(int tag) {
639    int old_tag;
640    old_tag = (int)(intptr_t)nextPtr();
641    // do_int(&old_tag);
642    assert(tag == old_tag, "old tag doesn't match");
643    FileMapInfo::assert_mark(tag == old_tag);
644  }
645
646  void do_region(u_char* start, size_t size) {
647    assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
648    assert(size % sizeof(intptr_t) == 0, "bad size");
649    do_tag((int)size);
650    while (size > 0) {
651      *(intptr_t*)start = nextPtr();
652      start += sizeof(intptr_t);
653      size -= sizeof(intptr_t);
654    }
655  }
656
657  bool reading() const { return true; }
658};
659
660
661// Save bounds of shared spaces mapped in.
662static char* _ro_base = NULL;
663static char* _rw_base = NULL;
664static char* _md_base = NULL;
665static char* _mc_base = NULL;
666
667// Return true if given address is in the mapped shared space.
668bool MetaspaceShared::is_in_shared_space(const void* p) {
669  if (_ro_base == NULL || _rw_base == NULL) {
670    return false;
671  } else {
672    return ((p > _ro_base && p < (_ro_base + SharedReadOnlySize)) ||
673            (p > _rw_base && p < (_rw_base + SharedReadWriteSize)));
674  }
675}
676
677void MetaspaceShared::print_shared_spaces() {
678  gclog_or_tty->print_cr("Shared Spaces:");
679  gclog_or_tty->print("  read-only " INTPTR_FORMAT "-" INTPTR_FORMAT,
680    _ro_base, _ro_base + SharedReadOnlySize);
681  gclog_or_tty->print("  read-write " INTPTR_FORMAT "-" INTPTR_FORMAT,
682    _rw_base, _rw_base + SharedReadWriteSize);
683  gclog_or_tty->cr();
684  gclog_or_tty->print("  misc-data " INTPTR_FORMAT "-" INTPTR_FORMAT,
685    _md_base, _md_base + SharedMiscDataSize);
686  gclog_or_tty->print("  misc-code " INTPTR_FORMAT "-" INTPTR_FORMAT,
687    _mc_base, _mc_base + SharedMiscCodeSize);
688  gclog_or_tty->cr();
689}
690
691
692// Map shared spaces at requested addresses and return if succeeded.
693// Need to keep the bounds of the ro and rw space for the Metaspace::contains
694// call, or is_in_shared_space.
695bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
696  size_t image_alignment = mapinfo->alignment();
697
698  // Map in the shared memory and then map the regions on top of it
699  ReservedSpace shared_rs = mapinfo->reserve_shared_memory();
700  if (!shared_rs.is_reserved()) return false;
701
702  // Split reserved memory into pieces (windows needs this)
703  ReservedSpace ro_rs   = shared_rs.first_part(SharedReadOnlySize);
704  ReservedSpace tmp_rs1 = shared_rs.last_part(SharedReadOnlySize);
705  ReservedSpace rw_rs   = tmp_rs1.first_part(SharedReadWriteSize);
706  ReservedSpace tmp_rs2 = tmp_rs1.last_part(SharedReadWriteSize);
707  ReservedSpace md_rs   = tmp_rs2.first_part(SharedMiscDataSize);
708  ReservedSpace mc_rs   = tmp_rs2.last_part(SharedMiscDataSize);
709
710  // Map each shared region
711  if ((_ro_base = mapinfo->map_region(ro)) != NULL &&
712      (_rw_base = mapinfo->map_region(rw)) != NULL &&
713      (_md_base = mapinfo->map_region(md)) != NULL &&
714      (_mc_base = mapinfo->map_region(mc)) != NULL &&
715      (image_alignment == (size_t)max_alignment())) {
716    // Success (no need to do anything)
717    return true;
718  } else {
719    // If there was a failure in mapping any of the spaces, unmap the ones
720    // that succeeded
721    if (_ro_base != NULL) mapinfo->unmap_region(ro);
722    if (_rw_base != NULL) mapinfo->unmap_region(rw);
723    if (_md_base != NULL) mapinfo->unmap_region(md);
724    if (_mc_base != NULL) mapinfo->unmap_region(mc);
725    // Release the entire mapped region
726    shared_rs.release();
727    // If -Xshare:on is specified, print out the error message and exit VM,
728    // otherwise, set UseSharedSpaces to false and continue.
729    if (RequireSharedSpaces) {
730      vm_exit_during_initialization("Unable to use shared archive.", NULL);
731    } else {
732      FLAG_SET_DEFAULT(UseSharedSpaces, false);
733    }
734    return false;
735  }
736}
737
738// Read the miscellaneous data from the shared file, and
739// serialize it out to its various destinations.
740
741void MetaspaceShared::initialize_shared_spaces() {
742  FileMapInfo *mapinfo = FileMapInfo::current_info();
743
744  char* buffer = mapinfo->region_base(md);
745
746  // Skip over (reserve space for) a list of addresses of C++ vtables
747  // for Klass objects.  They get filled in later.
748
749  void** vtbl_list = (void**)buffer;
750  buffer += MetaspaceShared::vtbl_list_size * sizeof(void*);
751  Universe::init_self_patching_vtbl_list(vtbl_list, vtbl_list_size);
752
753  // Skip over (reserve space for) dummy C++ vtables Klass objects.
754  // They are used as is.
755
756  intptr_t vtable_size = *(intptr_t*)buffer;
757  buffer += sizeof(intptr_t);
758  buffer += vtable_size;
759
760  // Create the symbol table using the bucket array at this spot in the
761  // misc data space.  Since the symbol table is often modified, this
762  // region (of mapped pages) will be copy-on-write.
763
764  int symbolTableLen = *(intptr_t*)buffer;
765  buffer += sizeof(intptr_t);
766  int number_of_entries = *(intptr_t*)buffer;
767  buffer += sizeof(intptr_t);
768  SymbolTable::create_table((HashtableBucket<mtSymbol>*)buffer, symbolTableLen,
769                            number_of_entries);
770  buffer += symbolTableLen;
771
772  // Create the shared dictionary using the bucket array at this spot in
773  // the misc data space.  Since the shared dictionary table is never
774  // modified, this region (of mapped pages) will be (effectively, if
775  // not explicitly) read-only.
776
777  int sharedDictionaryLen = *(intptr_t*)buffer;
778  buffer += sizeof(intptr_t);
779  number_of_entries = *(intptr_t*)buffer;
780  buffer += sizeof(intptr_t);
781  SystemDictionary::set_shared_dictionary((HashtableBucket<mtClass>*)buffer,
782                                          sharedDictionaryLen,
783                                          number_of_entries);
784  buffer += sharedDictionaryLen;
785
786  // Create the package info table using the bucket array at this spot in
787  // the misc data space.  Since the package info table is never
788  // modified, this region (of mapped pages) will be (effectively, if
789  // not explicitly) read-only.
790
791  int pkgInfoLen = *(intptr_t*)buffer;
792  buffer += sizeof(intptr_t);
793  number_of_entries = *(intptr_t*)buffer;
794  buffer += sizeof(intptr_t);
795  ClassLoader::create_package_info_table((HashtableBucket<mtClass>*)buffer, pkgInfoLen,
796                                         number_of_entries);
797  buffer += pkgInfoLen;
798  ClassLoader::verify();
799
800  // The following data in the shared misc data region are the linked
801  // list elements (HashtableEntry objects) for the symbol table, string
802  // table, and shared dictionary.  The heap objects refered to by the
803  // symbol table, string table, and shared dictionary are permanent and
804  // unmovable.  Since new entries added to the string and symbol tables
805  // are always added at the beginning of the linked lists, THESE LINKED
806  // LIST ELEMENTS ARE READ-ONLY.
807
808  int len = *(intptr_t*)buffer; // skip over symbol table entries
809  buffer += sizeof(intptr_t);
810  buffer += len;
811
812  len = *(intptr_t*)buffer;     // skip over shared dictionary entries
813  buffer += sizeof(intptr_t);
814  buffer += len;
815
816  len = *(intptr_t*)buffer;     // skip over package info table entries
817  buffer += sizeof(intptr_t);
818  buffer += len;
819
820  len = *(intptr_t*)buffer;     // skip over package info table char[] arrays.
821  buffer += sizeof(intptr_t);
822  buffer += len;
823
824  intptr_t* array = (intptr_t*)buffer;
825  ReadClosure rc(&array);
826  serialize(&rc);
827
828  // Close the mapinfo file
829  mapinfo->close();
830}
831
832// JVM/TI RedefineClasses() support:
833bool MetaspaceShared::remap_shared_readonly_as_readwrite() {
834  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
835
836  if (UseSharedSpaces) {
837    // remap the shared readonly space to shared readwrite, private
838    FileMapInfo* mapinfo = FileMapInfo::current_info();
839    if (!mapinfo->remap_shared_readonly_as_readwrite()) {
840      return false;
841    }
842  }
843  return true;
844}
845