universe.cpp revision 4454:cc32ccaaf47f
1/*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/classLoader.hpp"
27#include "classfile/classLoaderData.hpp"
28#include "classfile/javaClasses.hpp"
29#include "classfile/symbolTable.hpp"
30#include "classfile/systemDictionary.hpp"
31#include "classfile/vmSymbols.hpp"
32#include "code/codeCache.hpp"
33#include "code/dependencies.hpp"
34#include "gc_interface/collectedHeap.inline.hpp"
35#include "interpreter/interpreter.hpp"
36#include "memory/cardTableModRefBS.hpp"
37#include "memory/gcLocker.inline.hpp"
38#include "memory/genCollectedHeap.hpp"
39#include "memory/genRemSet.hpp"
40#include "memory/generation.hpp"
41#include "memory/metadataFactory.hpp"
42#include "memory/metaspaceShared.hpp"
43#include "memory/oopFactory.hpp"
44#include "memory/space.hpp"
45#include "memory/universe.hpp"
46#include "memory/universe.inline.hpp"
47#include "oops/constantPool.hpp"
48#include "oops/instanceClassLoaderKlass.hpp"
49#include "oops/instanceKlass.hpp"
50#include "oops/instanceMirrorKlass.hpp"
51#include "oops/instanceRefKlass.hpp"
52#include "oops/oop.inline.hpp"
53#include "oops/typeArrayKlass.hpp"
54#include "prims/jvmtiRedefineClassesTrace.hpp"
55#include "runtime/aprofiler.hpp"
56#include "runtime/arguments.hpp"
57#include "runtime/deoptimization.hpp"
58#include "runtime/fprofiler.hpp"
59#include "runtime/handles.inline.hpp"
60#include "runtime/init.hpp"
61#include "runtime/java.hpp"
62#include "runtime/javaCalls.hpp"
63#include "runtime/sharedRuntime.hpp"
64#include "runtime/synchronizer.hpp"
65#include "runtime/thread.inline.hpp"
66#include "runtime/timer.hpp"
67#include "runtime/vm_operations.hpp"
68#include "services/memoryService.hpp"
69#include "utilities/copy.hpp"
70#include "utilities/events.hpp"
71#include "utilities/hashtable.inline.hpp"
72#include "utilities/preserveException.hpp"
73#include "utilities/macros.hpp"
74#if INCLUDE_ALL_GCS
75#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
76#include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
77#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
78#include "gc_implementation/g1/g1CollectorPolicy.hpp"
79#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
80#endif // INCLUDE_ALL_GCS
81
82// Known objects
83Klass* Universe::_boolArrayKlassObj                 = NULL;
84Klass* Universe::_byteArrayKlassObj                 = NULL;
85Klass* Universe::_charArrayKlassObj                 = NULL;
86Klass* Universe::_intArrayKlassObj                  = NULL;
87Klass* Universe::_shortArrayKlassObj                = NULL;
88Klass* Universe::_longArrayKlassObj                 = NULL;
89Klass* Universe::_singleArrayKlassObj               = NULL;
90Klass* Universe::_doubleArrayKlassObj               = NULL;
91Klass* Universe::_typeArrayKlassObjs[T_VOID+1]      = { NULL /*, NULL...*/ };
92Klass* Universe::_objectArrayKlassObj               = NULL;
93oop Universe::_int_mirror                             = NULL;
94oop Universe::_float_mirror                           = NULL;
95oop Universe::_double_mirror                          = NULL;
96oop Universe::_byte_mirror                            = NULL;
97oop Universe::_bool_mirror                            = NULL;
98oop Universe::_char_mirror                            = NULL;
99oop Universe::_long_mirror                            = NULL;
100oop Universe::_short_mirror                           = NULL;
101oop Universe::_void_mirror                            = NULL;
102oop Universe::_mirrors[T_VOID+1]                      = { NULL /*, NULL...*/ };
103oop Universe::_main_thread_group                      = NULL;
104oop Universe::_system_thread_group                    = NULL;
105objArrayOop Universe::_the_empty_class_klass_array    = NULL;
106Array<Klass*>* Universe::_the_array_interfaces_array = NULL;
107oop Universe::_the_null_string                        = NULL;
108oop Universe::_the_min_jint_string                   = NULL;
109LatestMethodOopCache* Universe::_finalizer_register_cache = NULL;
110LatestMethodOopCache* Universe::_loader_addClass_cache    = NULL;
111ActiveMethodOopsCache* Universe::_reflect_invoke_cache    = NULL;
112oop Universe::_out_of_memory_error_java_heap          = NULL;
113oop Universe::_out_of_memory_error_perm_gen           = NULL;
114oop Universe::_out_of_memory_error_array_size         = NULL;
115oop Universe::_out_of_memory_error_gc_overhead_limit  = NULL;
116objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL;
117volatile jint Universe::_preallocated_out_of_memory_error_avail_count = 0;
118bool Universe::_verify_in_progress                    = false;
119oop Universe::_null_ptr_exception_instance            = NULL;
120oop Universe::_arithmetic_exception_instance          = NULL;
121oop Universe::_virtual_machine_error_instance         = NULL;
122oop Universe::_vm_exception                           = NULL;
123Array<int>* Universe::_the_empty_int_array            = NULL;
124Array<u2>* Universe::_the_empty_short_array           = NULL;
125Array<Klass*>* Universe::_the_empty_klass_array     = NULL;
126Array<Method*>* Universe::_the_empty_method_array   = NULL;
127
128// These variables are guarded by FullGCALot_lock.
129debug_only(objArrayOop Universe::_fullgc_alot_dummy_array = NULL;)
130debug_only(int Universe::_fullgc_alot_dummy_next      = 0;)
131
132// Heap
133int             Universe::_verify_count = 0;
134
135int             Universe::_base_vtable_size = 0;
136bool            Universe::_bootstrapping = false;
137bool            Universe::_fully_initialized = false;
138
139size_t          Universe::_heap_capacity_at_last_gc;
140size_t          Universe::_heap_used_at_last_gc = 0;
141
142CollectedHeap*  Universe::_collectedHeap = NULL;
143
144NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true };
145NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true };
146address Universe::_narrow_ptrs_base;
147
148size_t          Universe::_class_metaspace_size;
149
150void Universe::basic_type_classes_do(void f(Klass*)) {
151  f(boolArrayKlassObj());
152  f(byteArrayKlassObj());
153  f(charArrayKlassObj());
154  f(intArrayKlassObj());
155  f(shortArrayKlassObj());
156  f(longArrayKlassObj());
157  f(singleArrayKlassObj());
158  f(doubleArrayKlassObj());
159}
160
161void Universe::oops_do(OopClosure* f, bool do_all) {
162
163  f->do_oop((oop*) &_int_mirror);
164  f->do_oop((oop*) &_float_mirror);
165  f->do_oop((oop*) &_double_mirror);
166  f->do_oop((oop*) &_byte_mirror);
167  f->do_oop((oop*) &_bool_mirror);
168  f->do_oop((oop*) &_char_mirror);
169  f->do_oop((oop*) &_long_mirror);
170  f->do_oop((oop*) &_short_mirror);
171  f->do_oop((oop*) &_void_mirror);
172
173  for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
174    f->do_oop((oop*) &_mirrors[i]);
175  }
176  assert(_mirrors[0] == NULL && _mirrors[T_BOOLEAN - 1] == NULL, "checking");
177
178  f->do_oop((oop*)&_the_empty_class_klass_array);
179  f->do_oop((oop*)&_the_null_string);
180  f->do_oop((oop*)&_the_min_jint_string);
181  f->do_oop((oop*)&_out_of_memory_error_java_heap);
182  f->do_oop((oop*)&_out_of_memory_error_perm_gen);
183  f->do_oop((oop*)&_out_of_memory_error_array_size);
184  f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit);
185    f->do_oop((oop*)&_preallocated_out_of_memory_error_array);
186  f->do_oop((oop*)&_null_ptr_exception_instance);
187  f->do_oop((oop*)&_arithmetic_exception_instance);
188  f->do_oop((oop*)&_virtual_machine_error_instance);
189  f->do_oop((oop*)&_main_thread_group);
190  f->do_oop((oop*)&_system_thread_group);
191  f->do_oop((oop*)&_vm_exception);
192  debug_only(f->do_oop((oop*)&_fullgc_alot_dummy_array);)
193}
194
195// Serialize metadata in and out of CDS archive, not oops.
196void Universe::serialize(SerializeClosure* f, bool do_all) {
197
198  f->do_ptr((void**)&_boolArrayKlassObj);
199  f->do_ptr((void**)&_byteArrayKlassObj);
200  f->do_ptr((void**)&_charArrayKlassObj);
201  f->do_ptr((void**)&_intArrayKlassObj);
202  f->do_ptr((void**)&_shortArrayKlassObj);
203  f->do_ptr((void**)&_longArrayKlassObj);
204  f->do_ptr((void**)&_singleArrayKlassObj);
205  f->do_ptr((void**)&_doubleArrayKlassObj);
206  f->do_ptr((void**)&_objectArrayKlassObj);
207
208  {
209    for (int i = 0; i < T_VOID+1; i++) {
210      if (_typeArrayKlassObjs[i] != NULL) {
211        assert(i >= T_BOOLEAN, "checking");
212        f->do_ptr((void**)&_typeArrayKlassObjs[i]);
213      } else if (do_all) {
214        f->do_ptr((void**)&_typeArrayKlassObjs[i]);
215      }
216    }
217  }
218
219  f->do_ptr((void**)&_the_array_interfaces_array);
220  f->do_ptr((void**)&_the_empty_int_array);
221  f->do_ptr((void**)&_the_empty_short_array);
222  f->do_ptr((void**)&_the_empty_method_array);
223  f->do_ptr((void**)&_the_empty_klass_array);
224  _finalizer_register_cache->serialize(f);
225  _loader_addClass_cache->serialize(f);
226  _reflect_invoke_cache->serialize(f);
227}
228
229void Universe::check_alignment(uintx size, uintx alignment, const char* name) {
230  if (size < alignment || size % alignment != 0) {
231    ResourceMark rm;
232    stringStream st;
233    st.print("Size of %s (" UINTX_FORMAT " bytes) must be aligned to " UINTX_FORMAT " bytes", name, size, alignment);
234    char* error = st.as_string();
235    vm_exit_during_initialization(error);
236  }
237}
238
239void initialize_basic_type_klass(Klass* k, TRAPS) {
240  Klass* ok = SystemDictionary::Object_klass();
241  if (UseSharedSpaces) {
242    assert(k->super() == ok, "u3");
243    k->restore_unshareable_info(CHECK);
244  } else {
245    k->initialize_supers(ok, CHECK);
246  }
247  k->append_to_sibling_list();
248}
249
250void Universe::genesis(TRAPS) {
251  ResourceMark rm;
252
253  { FlagSetting fs(_bootstrapping, true);
254
255    { MutexLocker mc(Compile_lock);
256
257      // determine base vtable size; without that we cannot create the array klasses
258      compute_base_vtable_size();
259
260      if (!UseSharedSpaces) {
261        _boolArrayKlassObj      = TypeArrayKlass::create_klass(T_BOOLEAN, sizeof(jboolean), CHECK);
262        _charArrayKlassObj      = TypeArrayKlass::create_klass(T_CHAR,    sizeof(jchar),    CHECK);
263        _singleArrayKlassObj    = TypeArrayKlass::create_klass(T_FLOAT,   sizeof(jfloat),   CHECK);
264        _doubleArrayKlassObj    = TypeArrayKlass::create_klass(T_DOUBLE,  sizeof(jdouble),  CHECK);
265        _byteArrayKlassObj      = TypeArrayKlass::create_klass(T_BYTE,    sizeof(jbyte),    CHECK);
266        _shortArrayKlassObj     = TypeArrayKlass::create_klass(T_SHORT,   sizeof(jshort),   CHECK);
267        _intArrayKlassObj       = TypeArrayKlass::create_klass(T_INT,     sizeof(jint),     CHECK);
268        _longArrayKlassObj      = TypeArrayKlass::create_klass(T_LONG,    sizeof(jlong),    CHECK);
269
270        _typeArrayKlassObjs[T_BOOLEAN] = _boolArrayKlassObj;
271        _typeArrayKlassObjs[T_CHAR]    = _charArrayKlassObj;
272        _typeArrayKlassObjs[T_FLOAT]   = _singleArrayKlassObj;
273        _typeArrayKlassObjs[T_DOUBLE]  = _doubleArrayKlassObj;
274        _typeArrayKlassObjs[T_BYTE]    = _byteArrayKlassObj;
275        _typeArrayKlassObjs[T_SHORT]   = _shortArrayKlassObj;
276        _typeArrayKlassObjs[T_INT]     = _intArrayKlassObj;
277        _typeArrayKlassObjs[T_LONG]    = _longArrayKlassObj;
278
279        ClassLoaderData* null_cld = ClassLoaderData::the_null_class_loader_data();
280
281        _the_array_interfaces_array = MetadataFactory::new_array<Klass*>(null_cld, 2, NULL, CHECK);
282        _the_empty_int_array        = MetadataFactory::new_array<int>(null_cld, 0, CHECK);
283        _the_empty_short_array      = MetadataFactory::new_array<u2>(null_cld, 0, CHECK);
284        _the_empty_method_array     = MetadataFactory::new_array<Method*>(null_cld, 0, CHECK);
285        _the_empty_klass_array      = MetadataFactory::new_array<Klass*>(null_cld, 0, CHECK);
286      }
287    }
288
289    vmSymbols::initialize(CHECK);
290
291    SystemDictionary::initialize(CHECK);
292
293    Klass* ok = SystemDictionary::Object_klass();
294
295    _the_null_string            = StringTable::intern("null", CHECK);
296    _the_min_jint_string       = StringTable::intern("-2147483648", CHECK);
297
298    if (UseSharedSpaces) {
299      // Verify shared interfaces array.
300      assert(_the_array_interfaces_array->at(0) ==
301             SystemDictionary::Cloneable_klass(), "u3");
302      assert(_the_array_interfaces_array->at(1) ==
303             SystemDictionary::Serializable_klass(), "u3");
304    } else {
305      // Set up shared interfaces array.  (Do this before supers are set up.)
306      _the_array_interfaces_array->at_put(0, SystemDictionary::Cloneable_klass());
307      _the_array_interfaces_array->at_put(1, SystemDictionary::Serializable_klass());
308    }
309
310    initialize_basic_type_klass(boolArrayKlassObj(), CHECK);
311    initialize_basic_type_klass(charArrayKlassObj(), CHECK);
312    initialize_basic_type_klass(singleArrayKlassObj(), CHECK);
313    initialize_basic_type_klass(doubleArrayKlassObj(), CHECK);
314    initialize_basic_type_klass(byteArrayKlassObj(), CHECK);
315    initialize_basic_type_klass(shortArrayKlassObj(), CHECK);
316    initialize_basic_type_klass(intArrayKlassObj(), CHECK);
317    initialize_basic_type_klass(longArrayKlassObj(), CHECK);
318  } // end of core bootstrapping
319
320  // Maybe this could be lifted up now that object array can be initialized
321  // during the bootstrapping.
322
323  // OLD
324  // Initialize _objectArrayKlass after core bootstraping to make
325  // sure the super class is set up properly for _objectArrayKlass.
326  // ---
327  // NEW
328  // Since some of the old system object arrays have been converted to
329  // ordinary object arrays, _objectArrayKlass will be loaded when
330  // SystemDictionary::initialize(CHECK); is run. See the extra check
331  // for Object_klass_loaded in objArrayKlassKlass::allocate_objArray_klass_impl.
332  _objectArrayKlassObj = InstanceKlass::
333    cast(SystemDictionary::Object_klass())->array_klass(1, CHECK);
334  // OLD
335  // Add the class to the class hierarchy manually to make sure that
336  // its vtable is initialized after core bootstrapping is completed.
337  // ---
338  // New
339  // Have already been initialized.
340  _objectArrayKlassObj->append_to_sibling_list();
341
342  // Compute is_jdk version flags.
343  // Only 1.3 or later has the java.lang.Shutdown class.
344  // Only 1.4 or later has the java.lang.CharSequence interface.
345  // Only 1.5 or later has the java.lang.management.MemoryUsage class.
346  if (JDK_Version::is_partially_initialized()) {
347    uint8_t jdk_version;
348    Klass* k = SystemDictionary::resolve_or_null(
349        vmSymbols::java_lang_management_MemoryUsage(), THREAD);
350    CLEAR_PENDING_EXCEPTION; // ignore exceptions
351    if (k == NULL) {
352      k = SystemDictionary::resolve_or_null(
353          vmSymbols::java_lang_CharSequence(), THREAD);
354      CLEAR_PENDING_EXCEPTION; // ignore exceptions
355      if (k == NULL) {
356        k = SystemDictionary::resolve_or_null(
357            vmSymbols::java_lang_Shutdown(), THREAD);
358        CLEAR_PENDING_EXCEPTION; // ignore exceptions
359        if (k == NULL) {
360          jdk_version = 2;
361        } else {
362          jdk_version = 3;
363        }
364      } else {
365        jdk_version = 4;
366      }
367    } else {
368      jdk_version = 5;
369    }
370    JDK_Version::fully_initialize(jdk_version);
371  }
372
373  #ifdef ASSERT
374  if (FullGCALot) {
375    // Allocate an array of dummy objects.
376    // We'd like these to be at the bottom of the old generation,
377    // so that when we free one and then collect,
378    // (almost) the whole heap moves
379    // and we find out if we actually update all the oops correctly.
380    // But we can't allocate directly in the old generation,
381    // so we allocate wherever, and hope that the first collection
382    // moves these objects to the bottom of the old generation.
383    // We can allocate directly in the permanent generation, so we do.
384    int size;
385    if (UseConcMarkSweepGC) {
386      warning("Using +FullGCALot with concurrent mark sweep gc "
387              "will not force all objects to relocate");
388      size = FullGCALotDummies;
389    } else {
390      size = FullGCALotDummies * 2;
391    }
392    objArrayOop    naked_array = oopFactory::new_objArray(SystemDictionary::Object_klass(), size, CHECK);
393    objArrayHandle dummy_array(THREAD, naked_array);
394    int i = 0;
395    while (i < size) {
396        // Allocate dummy in old generation
397      oop dummy = InstanceKlass::cast(SystemDictionary::Object_klass())->allocate_instance(CHECK);
398      dummy_array->obj_at_put(i++, dummy);
399    }
400    {
401      // Only modify the global variable inside the mutex.
402      // If we had a race to here, the other dummy_array instances
403      // and their elements just get dropped on the floor, which is fine.
404      MutexLocker ml(FullGCALot_lock);
405      if (_fullgc_alot_dummy_array == NULL) {
406        _fullgc_alot_dummy_array = dummy_array();
407      }
408    }
409    assert(i == _fullgc_alot_dummy_array->length(), "just checking");
410  }
411  #endif
412
413  // Initialize dependency array for null class loader
414  ClassLoaderData::the_null_class_loader_data()->init_dependencies(CHECK);
415
416}
417
418// CDS support for patching vtables in metadata in the shared archive.
419// All types inherited from Metadata have vtables, but not types inherited
420// from MetaspaceObj, because the latter does not have virtual functions.
421// If the metadata type has a vtable, it cannot be shared in the read-only
422// section of the CDS archive, because the vtable pointer is patched.
423static inline void add_vtable(void** list, int* n, void* o, int count) {
424  guarantee((*n) < count, "vtable list too small");
425  void* vtable = dereference_vptr(o);
426  assert(*(void**)(vtable) != NULL, "invalid vtable");
427  list[(*n)++] = vtable;
428}
429
430void Universe::init_self_patching_vtbl_list(void** list, int count) {
431  int n = 0;
432  { InstanceKlass o;          add_vtable(list, &n, &o, count); }
433  { InstanceClassLoaderKlass o; add_vtable(list, &n, &o, count); }
434  { InstanceMirrorKlass o;    add_vtable(list, &n, &o, count); }
435  { InstanceRefKlass o;       add_vtable(list, &n, &o, count); }
436  { TypeArrayKlass o;         add_vtable(list, &n, &o, count); }
437  { ObjArrayKlass o;          add_vtable(list, &n, &o, count); }
438  { Method o;                 add_vtable(list, &n, &o, count); }
439  { ConstantPool o;           add_vtable(list, &n, &o, count); }
440}
441
442void Universe::initialize_basic_type_mirrors(TRAPS) {
443    assert(_int_mirror==NULL, "basic type mirrors already initialized");
444    _int_mirror     =
445      java_lang_Class::create_basic_type_mirror("int",    T_INT, CHECK);
446    _float_mirror   =
447      java_lang_Class::create_basic_type_mirror("float",  T_FLOAT,   CHECK);
448    _double_mirror  =
449      java_lang_Class::create_basic_type_mirror("double", T_DOUBLE,  CHECK);
450    _byte_mirror    =
451      java_lang_Class::create_basic_type_mirror("byte",   T_BYTE, CHECK);
452    _bool_mirror    =
453      java_lang_Class::create_basic_type_mirror("boolean",T_BOOLEAN, CHECK);
454    _char_mirror    =
455      java_lang_Class::create_basic_type_mirror("char",   T_CHAR, CHECK);
456    _long_mirror    =
457      java_lang_Class::create_basic_type_mirror("long",   T_LONG, CHECK);
458    _short_mirror   =
459      java_lang_Class::create_basic_type_mirror("short",  T_SHORT,   CHECK);
460    _void_mirror    =
461      java_lang_Class::create_basic_type_mirror("void",   T_VOID, CHECK);
462
463    _mirrors[T_INT]     = _int_mirror;
464    _mirrors[T_FLOAT]   = _float_mirror;
465    _mirrors[T_DOUBLE]  = _double_mirror;
466    _mirrors[T_BYTE]    = _byte_mirror;
467    _mirrors[T_BOOLEAN] = _bool_mirror;
468    _mirrors[T_CHAR]    = _char_mirror;
469    _mirrors[T_LONG]    = _long_mirror;
470    _mirrors[T_SHORT]   = _short_mirror;
471    _mirrors[T_VOID]    = _void_mirror;
472  //_mirrors[T_OBJECT]  = InstanceKlass::cast(_object_klass)->java_mirror();
473  //_mirrors[T_ARRAY]   = InstanceKlass::cast(_object_klass)->java_mirror();
474}
475
476void Universe::fixup_mirrors(TRAPS) {
477  // Bootstrap problem: all classes gets a mirror (java.lang.Class instance) assigned eagerly,
478  // but we cannot do that for classes created before java.lang.Class is loaded. Here we simply
479  // walk over permanent objects created so far (mostly classes) and fixup their mirrors. Note
480  // that the number of objects allocated at this point is very small.
481  assert(SystemDictionary::Class_klass_loaded(), "java.lang.Class should be loaded");
482  HandleMark hm(THREAD);
483  // Cache the start of the static fields
484  InstanceMirrorKlass::init_offset_of_static_fields();
485
486  GrowableArray <Klass*>* list = java_lang_Class::fixup_mirror_list();
487  int list_length = list->length();
488  for (int i = 0; i < list_length; i++) {
489    Klass* k = list->at(i);
490    assert(k->is_klass(), "List should only hold classes");
491    EXCEPTION_MARK;
492    KlassHandle kh(THREAD, k);
493    java_lang_Class::fixup_mirror(kh, CATCH);
494}
495  delete java_lang_Class::fixup_mirror_list();
496  java_lang_Class::set_fixup_mirror_list(NULL);
497}
498
499static bool has_run_finalizers_on_exit = false;
500
501void Universe::run_finalizers_on_exit() {
502  if (has_run_finalizers_on_exit) return;
503  has_run_finalizers_on_exit = true;
504
505  // Called on VM exit. This ought to be run in a separate thread.
506  if (TraceReferenceGC) tty->print_cr("Callback to run finalizers on exit");
507  {
508    PRESERVE_EXCEPTION_MARK;
509    KlassHandle finalizer_klass(THREAD, SystemDictionary::Finalizer_klass());
510    JavaValue result(T_VOID);
511    JavaCalls::call_static(
512      &result,
513      finalizer_klass,
514      vmSymbols::run_finalizers_on_exit_name(),
515      vmSymbols::void_method_signature(),
516      THREAD
517    );
518    // Ignore any pending exceptions
519    CLEAR_PENDING_EXCEPTION;
520  }
521}
522
523
524// initialize_vtable could cause gc if
525// 1) we specified true to initialize_vtable and
526// 2) this ran after gc was enabled
527// In case those ever change we use handles for oops
528void Universe::reinitialize_vtable_of(KlassHandle k_h, TRAPS) {
529  // init vtable of k and all subclasses
530  Klass* ko = k_h();
531  klassVtable* vt = ko->vtable();
532  if (vt) vt->initialize_vtable(false, CHECK);
533  if (ko->oop_is_instance()) {
534    InstanceKlass* ik = (InstanceKlass*)ko;
535    for (KlassHandle s_h(THREAD, ik->subklass()); s_h() != NULL; s_h = (THREAD, s_h()->next_sibling())) {
536      reinitialize_vtable_of(s_h, CHECK);
537    }
538  }
539}
540
541
542void initialize_itable_for_klass(Klass* k, TRAPS) {
543  InstanceKlass::cast(k)->itable()->initialize_itable(false, CHECK);
544}
545
546
547void Universe::reinitialize_itables(TRAPS) {
548  SystemDictionary::classes_do(initialize_itable_for_klass, CHECK);
549
550}
551
552
553bool Universe::on_page_boundary(void* addr) {
554  return ((uintptr_t) addr) % os::vm_page_size() == 0;
555}
556
557
558bool Universe::should_fill_in_stack_trace(Handle throwable) {
559  // never attempt to fill in the stack trace of preallocated errors that do not have
560  // backtrace. These errors are kept alive forever and may be "re-used" when all
561  // preallocated errors with backtrace have been consumed. Also need to avoid
562  // a potential loop which could happen if an out of memory occurs when attempting
563  // to allocate the backtrace.
564  return ((throwable() != Universe::_out_of_memory_error_java_heap) &&
565          (throwable() != Universe::_out_of_memory_error_perm_gen)  &&
566          (throwable() != Universe::_out_of_memory_error_array_size) &&
567          (throwable() != Universe::_out_of_memory_error_gc_overhead_limit));
568}
569
570
571oop Universe::gen_out_of_memory_error(oop default_err) {
572  // generate an out of memory error:
573  // - if there is a preallocated error with backtrace available then return it wth
574  //   a filled in stack trace.
575  // - if there are no preallocated errors with backtrace available then return
576  //   an error without backtrace.
577  int next;
578  if (_preallocated_out_of_memory_error_avail_count > 0) {
579    next = (int)Atomic::add(-1, &_preallocated_out_of_memory_error_avail_count);
580    assert(next < (int)PreallocatedOutOfMemoryErrorCount, "avail count is corrupt");
581  } else {
582    next = -1;
583  }
584  if (next < 0) {
585    // all preallocated errors have been used.
586    // return default
587    return default_err;
588  } else {
589    // get the error object at the slot and set set it to NULL so that the
590    // array isn't keeping it alive anymore.
591    oop exc = preallocated_out_of_memory_errors()->obj_at(next);
592    assert(exc != NULL, "slot has been used already");
593    preallocated_out_of_memory_errors()->obj_at_put(next, NULL);
594
595    // use the message from the default error
596    oop msg = java_lang_Throwable::message(default_err);
597    assert(msg != NULL, "no message");
598    java_lang_Throwable::set_message(exc, msg);
599
600    // populate the stack trace and return it.
601    java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(exc);
602    return exc;
603  }
604}
605
606static intptr_t non_oop_bits = 0;
607
608void* Universe::non_oop_word() {
609  // Neither the high bits nor the low bits of this value is allowed
610  // to look like (respectively) the high or low bits of a real oop.
611  //
612  // High and low are CPU-specific notions, but low always includes
613  // the low-order bit.  Since oops are always aligned at least mod 4,
614  // setting the low-order bit will ensure that the low half of the
615  // word will never look like that of a real oop.
616  //
617  // Using the OS-supplied non-memory-address word (usually 0 or -1)
618  // will take care of the high bits, however many there are.
619
620  if (non_oop_bits == 0) {
621    non_oop_bits = (intptr_t)os::non_memory_address_word() | 1;
622  }
623
624  return (void*)non_oop_bits;
625}
626
627jint universe_init() {
628  assert(!Universe::_fully_initialized, "called after initialize_vtables");
629  guarantee(1 << LogHeapWordSize == sizeof(HeapWord),
630         "LogHeapWordSize is incorrect.");
631  guarantee(sizeof(oop) >= sizeof(HeapWord), "HeapWord larger than oop?");
632  guarantee(sizeof(oop) % sizeof(HeapWord) == 0,
633            "oop size is not not a multiple of HeapWord size");
634  TraceTime timer("Genesis", TraceStartupTime);
635  GC_locker::lock();  // do not allow gc during bootstrapping
636  JavaClasses::compute_hard_coded_offsets();
637
638  jint status = Universe::initialize_heap();
639  if (status != JNI_OK) {
640    return status;
641  }
642
643  // Create memory for metadata.  Must be after initializing heap for
644  // DumpSharedSpaces.
645  ClassLoaderData::init_null_class_loader_data();
646
647  // We have a heap so create the Method* caches before
648  // Metaspace::initialize_shared_spaces() tries to populate them.
649  Universe::_finalizer_register_cache = new LatestMethodOopCache();
650  Universe::_loader_addClass_cache    = new LatestMethodOopCache();
651  Universe::_reflect_invoke_cache     = new ActiveMethodOopsCache();
652
653  if (UseSharedSpaces) {
654    // Read the data structures supporting the shared spaces (shared
655    // system dictionary, symbol table, etc.).  After that, access to
656    // the file (other than the mapped regions) is no longer needed, and
657    // the file is closed. Closing the file does not affect the
658    // currently mapped regions.
659    MetaspaceShared::initialize_shared_spaces();
660    StringTable::create_table();
661  } else {
662    SymbolTable::create_table();
663    StringTable::create_table();
664    ClassLoader::create_package_info_table();
665  }
666
667  return JNI_OK;
668}
669
670// Choose the heap base address and oop encoding mode
671// when compressed oops are used:
672// Unscaled  - Use 32-bits oops without encoding when
673//     NarrowOopHeapBaseMin + heap_size < 4Gb
674// ZeroBased - Use zero based compressed oops with encoding when
675//     NarrowOopHeapBaseMin + heap_size < 32Gb
676// HeapBased - Use compressed oops with heap base + encoding.
677
678// 4Gb
679static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1);
680// 32Gb
681// OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes;
682
683char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
684  size_t base = 0;
685#ifdef _LP64
686  if (UseCompressedOops) {
687    assert(mode == UnscaledNarrowOop  ||
688           mode == ZeroBasedNarrowOop ||
689           mode == HeapBasedNarrowOop, "mode is invalid");
690    const size_t total_size = heap_size + HeapBaseMinAddress;
691    // Return specified base for the first request.
692    if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
693      base = HeapBaseMinAddress;
694
695    // If the total size and the metaspace size are small enough to allow
696    // UnscaledNarrowOop then just use UnscaledNarrowOop.
697    } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop) &&
698        (!UseCompressedKlassPointers ||
699          (((OopEncodingHeapMax - heap_size) + Universe::class_metaspace_size()) <= KlassEncodingMetaspaceMax))) {
700      // We don't need to check the metaspace size here because it is always smaller
701      // than total_size.
702      if ((total_size <= NarrowOopHeapMax) && (mode == UnscaledNarrowOop) &&
703          (Universe::narrow_oop_shift() == 0)) {
704        // Use 32-bits oops without encoding and
705        // place heap's top on the 4Gb boundary
706        base = (NarrowOopHeapMax - heap_size);
707      } else {
708        // Can't reserve with NarrowOopShift == 0
709        Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
710        if (mode == UnscaledNarrowOop ||
711            mode == ZeroBasedNarrowOop && total_size <= NarrowOopHeapMax) {
712          // Use zero based compressed oops with encoding and
713          // place heap's top on the 32Gb boundary in case
714          // total_size > 4Gb or failed to reserve below 4Gb.
715          base = (OopEncodingHeapMax - heap_size);
716        }
717      }
718
719    // See if ZeroBaseNarrowOop encoding will work for a heap based at
720    // (KlassEncodingMetaspaceMax - class_metaspace_size()).
721    } else if (UseCompressedKlassPointers && (mode != HeapBasedNarrowOop) &&
722        (Universe::class_metaspace_size() + HeapBaseMinAddress <= KlassEncodingMetaspaceMax) &&
723        (KlassEncodingMetaspaceMax + heap_size - Universe::class_metaspace_size() <= OopEncodingHeapMax)) {
724      base = (KlassEncodingMetaspaceMax - Universe::class_metaspace_size());
725    } else {
726      // UnscaledNarrowOop encoding didn't work, and no base was found for ZeroBasedOops or
727      // HeapBasedNarrowOop encoding was requested.  So, can't reserve below 32Gb.
728      Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
729    }
730
731    // Set narrow_oop_base and narrow_oop_use_implicit_null_checks
732    // used in ReservedHeapSpace() constructors.
733    // The final values will be set in initialize_heap() below.
734    if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax) &&
735        (!UseCompressedKlassPointers || (base + Universe::class_metaspace_size()) <= KlassEncodingMetaspaceMax)) {
736      // Use zero based compressed oops
737      Universe::set_narrow_oop_base(NULL);
738      // Don't need guard page for implicit checks in indexed
739      // addressing mode with zero based Compressed Oops.
740      Universe::set_narrow_oop_use_implicit_null_checks(true);
741    } else {
742      // Set to a non-NULL value so the ReservedSpace ctor computes
743      // the correct no-access prefix.
744      // The final value will be set in initialize_heap() below.
745      Universe::set_narrow_oop_base((address)NarrowOopHeapMax);
746#ifdef _WIN64
747      if (UseLargePages) {
748        // Cannot allocate guard pages for implicit checks in indexed
749        // addressing mode when large pages are specified on windows.
750        Universe::set_narrow_oop_use_implicit_null_checks(false);
751      }
752#endif //  _WIN64
753    }
754  }
755#endif
756  return (char*)base; // also return NULL (don't care) for 32-bit VM
757}
758
759jint Universe::initialize_heap() {
760
761  if (UseParallelGC) {
762#if INCLUDE_ALL_GCS
763    Universe::_collectedHeap = new ParallelScavengeHeap();
764#else  // INCLUDE_ALL_GCS
765    fatal("UseParallelGC not supported in this VM.");
766#endif // INCLUDE_ALL_GCS
767
768  } else if (UseG1GC) {
769#if INCLUDE_ALL_GCS
770    G1CollectorPolicy* g1p = new G1CollectorPolicy();
771    G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
772    Universe::_collectedHeap = g1h;
773#else  // INCLUDE_ALL_GCS
774    fatal("UseG1GC not supported in java kernel vm.");
775#endif // INCLUDE_ALL_GCS
776
777  } else {
778    GenCollectorPolicy *gc_policy;
779
780    if (UseSerialGC) {
781      gc_policy = new MarkSweepPolicy();
782    } else if (UseConcMarkSweepGC) {
783#if INCLUDE_ALL_GCS
784      if (UseAdaptiveSizePolicy) {
785        gc_policy = new ASConcurrentMarkSweepPolicy();
786      } else {
787        gc_policy = new ConcurrentMarkSweepPolicy();
788      }
789#else  // INCLUDE_ALL_GCS
790    fatal("UseConcMarkSweepGC not supported in this VM.");
791#endif // INCLUDE_ALL_GCS
792    } else { // default old generation
793      gc_policy = new MarkSweepPolicy();
794    }
795
796    Universe::_collectedHeap = new GenCollectedHeap(gc_policy);
797  }
798
799  jint status = Universe::heap()->initialize();
800  if (status != JNI_OK) {
801    return status;
802  }
803
804#ifdef _LP64
805  if (UseCompressedOops) {
806    // Subtract a page because something can get allocated at heap base.
807    // This also makes implicit null checking work, because the
808    // memory+1 page below heap_base needs to cause a signal.
809    // See needs_explicit_null_check.
810    // Only set the heap base for compressed oops because it indicates
811    // compressed oops for pstack code.
812    bool verbose = PrintCompressedOopsMode || (PrintMiscellaneous && Verbose);
813    if (verbose) {
814      tty->cr();
815      tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
816                 Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
817    }
818    if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax) ||
819        (UseCompressedKlassPointers &&
820        ((uint64_t)Universe::heap()->base() + Universe::class_metaspace_size() > KlassEncodingMetaspaceMax))) {
821      // Can't reserve heap below 32Gb.
822      // keep the Universe::narrow_oop_base() set in Universe::reserve_heap()
823      Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
824      if (verbose) {
825        tty->print(", Compressed Oops with base: "PTR_FORMAT, Universe::narrow_oop_base());
826      }
827    } else {
828      Universe::set_narrow_oop_base(0);
829      if (verbose) {
830        tty->print(", zero based Compressed Oops");
831      }
832#ifdef _WIN64
833      if (!Universe::narrow_oop_use_implicit_null_checks()) {
834        // Don't need guard page for implicit checks in indexed addressing
835        // mode with zero based Compressed Oops.
836        Universe::set_narrow_oop_use_implicit_null_checks(true);
837      }
838#endif //  _WIN64
839      if((uint64_t)Universe::heap()->reserved_region().end() > NarrowOopHeapMax) {
840        // Can't reserve heap below 4Gb.
841        Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
842      } else {
843        Universe::set_narrow_oop_shift(0);
844        if (verbose) {
845          tty->print(", 32-bits Oops");
846        }
847      }
848    }
849    if (verbose) {
850      tty->cr();
851      tty->cr();
852    }
853    if (UseCompressedKlassPointers) {
854      Universe::set_narrow_klass_base(Universe::narrow_oop_base());
855      Universe::set_narrow_klass_shift(MIN2(Universe::narrow_oop_shift(), LogKlassAlignmentInBytes));
856    }
857    Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
858  }
859  // Universe::narrow_oop_base() is one page below the metaspace
860  // base. The actual metaspace base depends on alignment constraints
861  // so we don't know its exact location here.
862  assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() - os::vm_page_size() - ClassMetaspaceSize) ||
863         Universe::narrow_oop_base() == NULL, "invalid value");
864  assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
865         Universe::narrow_oop_shift() == 0, "invalid value");
866#endif
867
868  // We will never reach the CATCH below since Exceptions::_throw will cause
869  // the VM to exit if an exception is thrown during initialization
870
871  if (UseTLAB) {
872    assert(Universe::heap()->supports_tlab_allocation(),
873           "Should support thread-local allocation buffers");
874    ThreadLocalAllocBuffer::startup_initialization();
875  }
876  return JNI_OK;
877}
878
879
880// Reserve the Java heap, which is now the same for all GCs.
881ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
882  // Add in the class metaspace area so the classes in the headers can
883  // be compressed the same as instances.
884  // Need to round class space size up because it's below the heap and
885  // the actual alignment depends on its size.
886  Universe::set_class_metaspace_size(align_size_up(ClassMetaspaceSize, alignment));
887  size_t total_reserved = align_size_up(heap_size + Universe::class_metaspace_size(), alignment);
888  assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
889      "heap size is too big for compressed oops");
890  char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
891
892  ReservedHeapSpace total_rs(total_reserved, alignment, UseLargePages, addr);
893
894  if (UseCompressedOops) {
895    if (addr != NULL && !total_rs.is_reserved()) {
896      // Failed to reserve at specified address - the requested memory
897      // region is taken already, for example, by 'java' launcher.
898      // Try again to reserver heap higher.
899      addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
900
901      ReservedHeapSpace total_rs0(total_reserved, alignment,
902                                  UseLargePages, addr);
903
904      if (addr != NULL && !total_rs0.is_reserved()) {
905        // Failed to reserve at specified address again - give up.
906        addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
907        assert(addr == NULL, "");
908
909        ReservedHeapSpace total_rs1(total_reserved, alignment,
910                                    UseLargePages, addr);
911        total_rs = total_rs1;
912      } else {
913        total_rs = total_rs0;
914      }
915    }
916  }
917
918  if (!total_rs.is_reserved()) {
919    vm_exit_during_initialization(err_msg("Could not reserve enough space for object heap %d bytes", total_reserved));
920    return total_rs;
921  }
922
923  // Split the reserved space into main Java heap and a space for
924  // classes so that they can be compressed using the same algorithm
925  // as compressed oops. If compress oops and compress klass ptrs are
926  // used we need the meta space first: if the alignment used for
927  // compressed oops is greater than the one used for compressed klass
928  // ptrs, a metadata space on top of the heap could become
929  // unreachable.
930  ReservedSpace class_rs = total_rs.first_part(Universe::class_metaspace_size());
931  ReservedSpace heap_rs = total_rs.last_part(Universe::class_metaspace_size(), alignment);
932  Metaspace::initialize_class_space(class_rs);
933
934  if (UseCompressedOops) {
935    // Universe::initialize_heap() will reset this to NULL if unscaled
936    // or zero-based narrow oops are actually used.
937    address base = (address)(total_rs.base() - os::vm_page_size());
938    Universe::set_narrow_oop_base(base);
939  }
940  return heap_rs;
941}
942
943
944// It's the caller's repsonsibility to ensure glitch-freedom
945// (if required).
946void Universe::update_heap_info_at_gc() {
947  _heap_capacity_at_last_gc = heap()->capacity();
948  _heap_used_at_last_gc     = heap()->used();
949}
950
951
952
953void universe2_init() {
954  EXCEPTION_MARK;
955  Universe::genesis(CATCH);
956  // Although we'd like to verify here that the state of the heap
957  // is good, we can't because the main thread has not yet added
958  // itself to the threads list (so, using current interfaces
959  // we can't "fill" its TLAB), unless TLABs are disabled.
960  if (VerifyBeforeGC && !UseTLAB &&
961      Universe::heap()->total_collections() >= VerifyGCStartAt) {
962     Universe::heap()->prepare_for_verify();
963     Universe::verify();   // make sure we're starting with a clean slate
964  }
965}
966
967
968// This function is defined in JVM.cpp
969extern void initialize_converter_functions();
970
971bool universe_post_init() {
972  assert(!is_init_completed(), "Error: initialization not yet completed!");
973  Universe::_fully_initialized = true;
974  EXCEPTION_MARK;
975  { ResourceMark rm;
976    Interpreter::initialize();      // needed for interpreter entry points
977    if (!UseSharedSpaces) {
978      HandleMark hm(THREAD);
979      KlassHandle ok_h(THREAD, SystemDictionary::Object_klass());
980      Universe::reinitialize_vtable_of(ok_h, CHECK_false);
981      Universe::reinitialize_itables(CHECK_false);
982    }
983  }
984
985  HandleMark hm(THREAD);
986  Klass* k;
987  instanceKlassHandle k_h;
988    // Setup preallocated empty java.lang.Class array
989    Universe::_the_empty_class_klass_array = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_false);
990
991    // Setup preallocated OutOfMemoryError errors
992    k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_OutOfMemoryError(), true, CHECK_false);
993    k_h = instanceKlassHandle(THREAD, k);
994    Universe::_out_of_memory_error_java_heap = k_h->allocate_instance(CHECK_false);
995    Universe::_out_of_memory_error_perm_gen = k_h->allocate_instance(CHECK_false);
996    Universe::_out_of_memory_error_array_size = k_h->allocate_instance(CHECK_false);
997    Universe::_out_of_memory_error_gc_overhead_limit =
998      k_h->allocate_instance(CHECK_false);
999
1000    // Setup preallocated NullPointerException
1001    // (this is currently used for a cheap & dirty solution in compiler exception handling)
1002    k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_NullPointerException(), true, CHECK_false);
1003    Universe::_null_ptr_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
1004    // Setup preallocated ArithmeticException
1005    // (this is currently used for a cheap & dirty solution in compiler exception handling)
1006    k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_ArithmeticException(), true, CHECK_false);
1007    Universe::_arithmetic_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
1008    // Virtual Machine Error for when we get into a situation we can't resolve
1009    k = SystemDictionary::resolve_or_fail(
1010      vmSymbols::java_lang_VirtualMachineError(), true, CHECK_false);
1011    bool linked = InstanceKlass::cast(k)->link_class_or_fail(CHECK_false);
1012    if (!linked) {
1013      tty->print_cr("Unable to link/verify VirtualMachineError class");
1014      return false; // initialization failed
1015    }
1016    Universe::_virtual_machine_error_instance =
1017      InstanceKlass::cast(k)->allocate_instance(CHECK_false);
1018
1019    Universe::_vm_exception               = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
1020
1021  if (!DumpSharedSpaces) {
1022    // These are the only Java fields that are currently set during shared space dumping.
1023    // We prefer to not handle this generally, so we always reinitialize these detail messages.
1024    Handle msg = java_lang_String::create_from_str("Java heap space", CHECK_false);
1025    java_lang_Throwable::set_message(Universe::_out_of_memory_error_java_heap, msg());
1026
1027    msg = java_lang_String::create_from_str("Metadata space", CHECK_false);
1028    java_lang_Throwable::set_message(Universe::_out_of_memory_error_perm_gen, msg());
1029
1030    msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false);
1031    java_lang_Throwable::set_message(Universe::_out_of_memory_error_array_size, msg());
1032
1033    msg = java_lang_String::create_from_str("GC overhead limit exceeded", CHECK_false);
1034    java_lang_Throwable::set_message(Universe::_out_of_memory_error_gc_overhead_limit, msg());
1035
1036    msg = java_lang_String::create_from_str("/ by zero", CHECK_false);
1037    java_lang_Throwable::set_message(Universe::_arithmetic_exception_instance, msg());
1038
1039    // Setup the array of errors that have preallocated backtrace
1040    k = Universe::_out_of_memory_error_java_heap->klass();
1041    assert(k->name() == vmSymbols::java_lang_OutOfMemoryError(), "should be out of memory error");
1042    k_h = instanceKlassHandle(THREAD, k);
1043
1044    int len = (StackTraceInThrowable) ? (int)PreallocatedOutOfMemoryErrorCount : 0;
1045    Universe::_preallocated_out_of_memory_error_array = oopFactory::new_objArray(k_h(), len, CHECK_false);
1046    for (int i=0; i<len; i++) {
1047      oop err = k_h->allocate_instance(CHECK_false);
1048      Handle err_h = Handle(THREAD, err);
1049      java_lang_Throwable::allocate_backtrace(err_h, CHECK_false);
1050      Universe::preallocated_out_of_memory_errors()->obj_at_put(i, err_h());
1051    }
1052    Universe::_preallocated_out_of_memory_error_avail_count = (jint)len;
1053  }
1054
1055
1056  // Setup static method for registering finalizers
1057  // The finalizer klass must be linked before looking up the method, in
1058  // case it needs to get rewritten.
1059  InstanceKlass::cast(SystemDictionary::Finalizer_klass())->link_class(CHECK_false);
1060  Method* m = InstanceKlass::cast(SystemDictionary::Finalizer_klass())->find_method(
1061                                  vmSymbols::register_method_name(),
1062                                  vmSymbols::register_method_signature());
1063  if (m == NULL || !m->is_static()) {
1064    THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
1065      "java.lang.ref.Finalizer.register", false);
1066  }
1067  Universe::_finalizer_register_cache->init(
1068    SystemDictionary::Finalizer_klass(), m, CHECK_false);
1069
1070  // Resolve on first use and initialize class.
1071  // Note: No race-condition here, since a resolve will always return the same result
1072
1073  // Setup method for security checks
1074  k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_reflect_Method(), true, CHECK_false);
1075  k_h = instanceKlassHandle(THREAD, k);
1076  k_h->link_class(CHECK_false);
1077  m = k_h->find_method(vmSymbols::invoke_name(), vmSymbols::object_object_array_object_signature());
1078  if (m == NULL || m->is_static()) {
1079    THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
1080      "java.lang.reflect.Method.invoke", false);
1081  }
1082  Universe::_reflect_invoke_cache->init(k_h(), m, CHECK_false);
1083
1084  // Setup method for registering loaded classes in class loader vector
1085  InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->link_class(CHECK_false);
1086  m = InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature());
1087  if (m == NULL || m->is_static()) {
1088    THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
1089      "java.lang.ClassLoader.addClass", false);
1090  }
1091  Universe::_loader_addClass_cache->init(
1092    SystemDictionary::ClassLoader_klass(), m, CHECK_false);
1093
1094  // The folowing is initializing converter functions for serialization in
1095  // JVM.cpp. If we clean up the StrictMath code above we may want to find
1096  // a better solution for this as well.
1097  initialize_converter_functions();
1098
1099  // This needs to be done before the first scavenge/gc, since
1100  // it's an input to soft ref clearing policy.
1101  {
1102    MutexLocker x(Heap_lock);
1103    Universe::update_heap_info_at_gc();
1104  }
1105
1106  // ("weak") refs processing infrastructure initialization
1107  Universe::heap()->post_initialize();
1108
1109  // Initialize performance counters for metaspaces
1110  MetaspaceCounters::initialize_performance_counters();
1111
1112  GC_locker::unlock();  // allow gc after bootstrapping
1113
1114  MemoryService::set_universe_heap(Universe::_collectedHeap);
1115  return true;
1116}
1117
1118
1119void Universe::compute_base_vtable_size() {
1120  _base_vtable_size = ClassLoader::compute_Object_vtable();
1121}
1122
1123
1124// %%% The Universe::flush_foo methods belong in CodeCache.
1125
1126// Flushes compiled methods dependent on dependee.
1127void Universe::flush_dependents_on(instanceKlassHandle dependee) {
1128  assert_lock_strong(Compile_lock);
1129
1130  if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
1131
1132  // CodeCache can only be updated by a thread_in_VM and they will all be
1133  // stopped dring the safepoint so CodeCache will be safe to update without
1134  // holding the CodeCache_lock.
1135
1136  KlassDepChange changes(dependee);
1137
1138  // Compute the dependent nmethods
1139  if (CodeCache::mark_for_deoptimization(changes) > 0) {
1140    // At least one nmethod has been marked for deoptimization
1141    VM_Deoptimize op;
1142    VMThread::execute(&op);
1143  }
1144}
1145
1146// Flushes compiled methods dependent on a particular CallSite
1147// instance when its target is different than the given MethodHandle.
1148void Universe::flush_dependents_on(Handle call_site, Handle method_handle) {
1149  assert_lock_strong(Compile_lock);
1150
1151  if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
1152
1153  // CodeCache can only be updated by a thread_in_VM and they will all be
1154  // stopped dring the safepoint so CodeCache will be safe to update without
1155  // holding the CodeCache_lock.
1156
1157  CallSiteDepChange changes(call_site(), method_handle());
1158
1159  // Compute the dependent nmethods that have a reference to a
1160  // CallSite object.  We use InstanceKlass::mark_dependent_nmethod
1161  // directly instead of CodeCache::mark_for_deoptimization because we
1162  // want dependents on the call site class only not all classes in
1163  // the ContextStream.
1164  int marked = 0;
1165  {
1166    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1167    InstanceKlass* call_site_klass = InstanceKlass::cast(call_site->klass());
1168    marked = call_site_klass->mark_dependent_nmethods(changes);
1169  }
1170  if (marked > 0) {
1171    // At least one nmethod has been marked for deoptimization
1172    VM_Deoptimize op;
1173    VMThread::execute(&op);
1174  }
1175}
1176
1177#ifdef HOTSWAP
1178// Flushes compiled methods dependent on dependee in the evolutionary sense
1179void Universe::flush_evol_dependents_on(instanceKlassHandle ev_k_h) {
1180  // --- Compile_lock is not held. However we are at a safepoint.
1181  assert_locked_or_safepoint(Compile_lock);
1182  if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
1183
1184  // CodeCache can only be updated by a thread_in_VM and they will all be
1185  // stopped dring the safepoint so CodeCache will be safe to update without
1186  // holding the CodeCache_lock.
1187
1188  // Compute the dependent nmethods
1189  if (CodeCache::mark_for_evol_deoptimization(ev_k_h) > 0) {
1190    // At least one nmethod has been marked for deoptimization
1191
1192    // All this already happens inside a VM_Operation, so we'll do all the work here.
1193    // Stuff copied from VM_Deoptimize and modified slightly.
1194
1195    // We do not want any GCs to happen while we are in the middle of this VM operation
1196    ResourceMark rm;
1197    DeoptimizationMarker dm;
1198
1199    // Deoptimize all activations depending on marked nmethods
1200    Deoptimization::deoptimize_dependents();
1201
1202    // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
1203    CodeCache::make_marked_nmethods_not_entrant();
1204  }
1205}
1206#endif // HOTSWAP
1207
1208
1209// Flushes compiled methods dependent on dependee
1210void Universe::flush_dependents_on_method(methodHandle m_h) {
1211  // --- Compile_lock is not held. However we are at a safepoint.
1212  assert_locked_or_safepoint(Compile_lock);
1213
1214  // CodeCache can only be updated by a thread_in_VM and they will all be
1215  // stopped dring the safepoint so CodeCache will be safe to update without
1216  // holding the CodeCache_lock.
1217
1218  // Compute the dependent nmethods
1219  if (CodeCache::mark_for_deoptimization(m_h()) > 0) {
1220    // At least one nmethod has been marked for deoptimization
1221
1222    // All this already happens inside a VM_Operation, so we'll do all the work here.
1223    // Stuff copied from VM_Deoptimize and modified slightly.
1224
1225    // We do not want any GCs to happen while we are in the middle of this VM operation
1226    ResourceMark rm;
1227    DeoptimizationMarker dm;
1228
1229    // Deoptimize all activations depending on marked nmethods
1230    Deoptimization::deoptimize_dependents();
1231
1232    // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
1233    CodeCache::make_marked_nmethods_not_entrant();
1234  }
1235}
1236
1237void Universe::print() {
1238  print_on(gclog_or_tty);
1239}
1240
1241void Universe::print_on(outputStream* st, bool extended) {
1242  st->print_cr("Heap");
1243  if (!extended) {
1244    heap()->print_on(st);
1245  } else {
1246    heap()->print_extended_on(st);
1247  }
1248}
1249
1250void Universe::print_heap_at_SIGBREAK() {
1251  if (PrintHeapAtSIGBREAK) {
1252    MutexLocker hl(Heap_lock);
1253    print_on(tty);
1254    tty->cr();
1255    tty->flush();
1256  }
1257}
1258
1259void Universe::print_heap_before_gc(outputStream* st, bool ignore_extended) {
1260  st->print_cr("{Heap before GC invocations=%u (full %u):",
1261               heap()->total_collections(),
1262               heap()->total_full_collections());
1263  if (!PrintHeapAtGCExtended || ignore_extended) {
1264    heap()->print_on(st);
1265  } else {
1266    heap()->print_extended_on(st);
1267  }
1268}
1269
1270void Universe::print_heap_after_gc(outputStream* st, bool ignore_extended) {
1271  st->print_cr("Heap after GC invocations=%u (full %u):",
1272               heap()->total_collections(),
1273               heap()->total_full_collections());
1274  if (!PrintHeapAtGCExtended || ignore_extended) {
1275    heap()->print_on(st);
1276  } else {
1277    heap()->print_extended_on(st);
1278  }
1279  st->print_cr("}");
1280}
1281
1282void Universe::verify(bool silent, VerifyOption option) {
1283  // The use of _verify_in_progress is a temporary work around for
1284  // 6320749.  Don't bother with a creating a class to set and clear
1285  // it since it is only used in this method and the control flow is
1286  // straight forward.
1287  _verify_in_progress = true;
1288
1289  COMPILER2_PRESENT(
1290    assert(!DerivedPointerTable::is_active(),
1291         "DPT should not be active during verification "
1292         "(of thread stacks below)");
1293  )
1294
1295  ResourceMark rm;
1296  HandleMark hm;  // Handles created during verification can be zapped
1297  _verify_count++;
1298
1299  if (!silent) gclog_or_tty->print("[Verifying ");
1300  if (!silent) gclog_or_tty->print("threads ");
1301  Threads::verify();
1302  heap()->verify(silent, option);
1303
1304  if (!silent) gclog_or_tty->print("syms ");
1305  SymbolTable::verify();
1306  if (!silent) gclog_or_tty->print("strs ");
1307  StringTable::verify();
1308  {
1309    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1310    if (!silent) gclog_or_tty->print("zone ");
1311    CodeCache::verify();
1312  }
1313  if (!silent) gclog_or_tty->print("dict ");
1314  SystemDictionary::verify();
1315#ifndef PRODUCT
1316  if (!silent) gclog_or_tty->print("cldg ");
1317  ClassLoaderDataGraph::verify();
1318#endif
1319  if (!silent) gclog_or_tty->print("metaspace chunks ");
1320  MetaspaceAux::verify_free_chunks();
1321  if (!silent) gclog_or_tty->print("hand ");
1322  JNIHandles::verify();
1323  if (!silent) gclog_or_tty->print("C-heap ");
1324  os::check_heap();
1325  if (!silent) gclog_or_tty->print("code cache ");
1326  CodeCache::verify_oops();
1327  if (!silent) gclog_or_tty->print_cr("]");
1328
1329  _verify_in_progress = false;
1330}
1331
1332// Oop verification (see MacroAssembler::verify_oop)
1333
1334static uintptr_t _verify_oop_data[2]   = {0, (uintptr_t)-1};
1335static uintptr_t _verify_klass_data[2] = {0, (uintptr_t)-1};
1336
1337
1338#ifndef PRODUCT
1339
1340static void calculate_verify_data(uintptr_t verify_data[2],
1341                                  HeapWord* low_boundary,
1342                                  HeapWord* high_boundary) {
1343  assert(low_boundary < high_boundary, "bad interval");
1344
1345  // decide which low-order bits we require to be clear:
1346  size_t alignSize = MinObjAlignmentInBytes;
1347  size_t min_object_size = CollectedHeap::min_fill_size();
1348
1349  // make an inclusive limit:
1350  uintptr_t max = (uintptr_t)high_boundary - min_object_size*wordSize;
1351  uintptr_t min = (uintptr_t)low_boundary;
1352  assert(min < max, "bad interval");
1353  uintptr_t diff = max ^ min;
1354
1355  // throw away enough low-order bits to make the diff vanish
1356  uintptr_t mask = (uintptr_t)(-1);
1357  while ((mask & diff) != 0)
1358    mask <<= 1;
1359  uintptr_t bits = (min & mask);
1360  assert(bits == (max & mask), "correct mask");
1361  // check an intermediate value between min and max, just to make sure:
1362  assert(bits == ((min + (max-min)/2) & mask), "correct mask");
1363
1364  // require address alignment, too:
1365  mask |= (alignSize - 1);
1366
1367  if (!(verify_data[0] == 0 && verify_data[1] == (uintptr_t)-1)) {
1368    assert(verify_data[0] == mask && verify_data[1] == bits, "mask stability");
1369  }
1370  verify_data[0] = mask;
1371  verify_data[1] = bits;
1372}
1373
1374// Oop verification (see MacroAssembler::verify_oop)
1375
1376uintptr_t Universe::verify_oop_mask() {
1377  MemRegion m = heap()->reserved_region();
1378  calculate_verify_data(_verify_oop_data,
1379                        m.start(),
1380                        m.end());
1381  return _verify_oop_data[0];
1382}
1383
1384
1385
1386uintptr_t Universe::verify_oop_bits() {
1387  verify_oop_mask();
1388  return _verify_oop_data[1];
1389}
1390
1391uintptr_t Universe::verify_mark_mask() {
1392  return markOopDesc::lock_mask_in_place;
1393}
1394
1395uintptr_t Universe::verify_mark_bits() {
1396  intptr_t mask = verify_mark_mask();
1397  intptr_t bits = (intptr_t)markOopDesc::prototype();
1398  assert((bits & ~mask) == 0, "no stray header bits");
1399  return bits;
1400}
1401#endif // PRODUCT
1402
1403
1404void Universe::compute_verify_oop_data() {
1405  verify_oop_mask();
1406  verify_oop_bits();
1407  verify_mark_mask();
1408  verify_mark_bits();
1409}
1410
1411
1412void CommonMethodOopCache::init(Klass* k, Method* m, TRAPS) {
1413  if (!UseSharedSpaces) {
1414    _klass = k;
1415  }
1416#ifndef PRODUCT
1417  else {
1418    // sharing initilization should have already set up _klass
1419    assert(_klass != NULL, "just checking");
1420  }
1421#endif
1422
1423  _method_idnum = m->method_idnum();
1424  assert(_method_idnum >= 0, "sanity check");
1425}
1426
1427
1428ActiveMethodOopsCache::~ActiveMethodOopsCache() {
1429  if (_prev_methods != NULL) {
1430    delete _prev_methods;
1431    _prev_methods = NULL;
1432  }
1433}
1434
1435
1436void ActiveMethodOopsCache::add_previous_version(Method* const method) {
1437  assert(Thread::current()->is_VM_thread(),
1438    "only VMThread can add previous versions");
1439
1440  // Only append the previous method if it is executing on the stack.
1441  if (method->on_stack()) {
1442
1443  if (_prev_methods == NULL) {
1444    // This is the first previous version so make some space.
1445    // Start with 2 elements under the assumption that the class
1446    // won't be redefined much.
1447      _prev_methods = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Method*>(2, true);
1448  }
1449
1450  // RC_TRACE macro has an embedded ResourceMark
1451  RC_TRACE(0x00000100,
1452    ("add: %s(%s): adding prev version ref for cached method @%d",
1453    method->name()->as_C_string(), method->signature()->as_C_string(),
1454    _prev_methods->length()));
1455
1456    _prev_methods->append(method);
1457  }
1458
1459
1460  // Since the caller is the VMThread and we are at a safepoint, this is a good
1461  // time to clear out unused method references.
1462
1463  if (_prev_methods == NULL) return;
1464
1465  for (int i = _prev_methods->length() - 1; i >= 0; i--) {
1466    Method* method = _prev_methods->at(i);
1467    assert(method != NULL, "weak method ref was unexpectedly cleared");
1468
1469    if (!method->on_stack()) {
1470      // This method isn't running anymore so remove it
1471      _prev_methods->remove_at(i);
1472      MetadataFactory::free_metadata(method->method_holder()->class_loader_data(), method);
1473    } else {
1474      // RC_TRACE macro has an embedded ResourceMark
1475      RC_TRACE(0x00000400, ("add: %s(%s): previous cached method @%d is alive",
1476        method->name()->as_C_string(), method->signature()->as_C_string(), i));
1477    }
1478  }
1479} // end add_previous_version()
1480
1481
1482bool ActiveMethodOopsCache::is_same_method(Method* const method) const {
1483  InstanceKlass* ik = InstanceKlass::cast(klass());
1484  Method* check_method = ik->method_with_idnum(method_idnum());
1485  assert(check_method != NULL, "sanity check");
1486  if (check_method == method) {
1487    // done with the easy case
1488    return true;
1489  }
1490
1491  if (_prev_methods != NULL) {
1492    // The cached method has been redefined at least once so search
1493    // the previous versions for a match.
1494    for (int i = 0; i < _prev_methods->length(); i++) {
1495      check_method = _prev_methods->at(i);
1496      if (check_method == method) {
1497        // a previous version matches
1498        return true;
1499      }
1500    }
1501  }
1502
1503  // either no previous versions or no previous version matched
1504  return false;
1505}
1506
1507
1508Method* LatestMethodOopCache::get_Method() {
1509  InstanceKlass* ik = InstanceKlass::cast(klass());
1510  Method* m = ik->method_with_idnum(method_idnum());
1511  assert(m != NULL, "sanity check");
1512  return m;
1513}
1514
1515
1516#ifdef ASSERT
1517// Release dummy object(s) at bottom of heap
1518bool Universe::release_fullgc_alot_dummy() {
1519  MutexLocker ml(FullGCALot_lock);
1520  if (_fullgc_alot_dummy_array != NULL) {
1521    if (_fullgc_alot_dummy_next >= _fullgc_alot_dummy_array->length()) {
1522      // No more dummies to release, release entire array instead
1523      _fullgc_alot_dummy_array = NULL;
1524      return false;
1525    }
1526    if (!UseConcMarkSweepGC) {
1527      // Release dummy at bottom of old generation
1528      _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL);
1529    }
1530    // Release dummy at bottom of permanent generation
1531    _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL);
1532  }
1533  return true;
1534}
1535
1536#endif // ASSERT
1537