universe.cpp revision 5976:2b8e28fdf503
1/* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25#include "precompiled.hpp" 26#include "classfile/classLoader.hpp" 27#include "classfile/classLoaderData.hpp" 28#include "classfile/javaClasses.hpp" 29#include "classfile/symbolTable.hpp" 30#include "classfile/systemDictionary.hpp" 31#include "classfile/vmSymbols.hpp" 32#include "code/codeCache.hpp" 33#include "code/dependencies.hpp" 34#include "gc_interface/collectedHeap.inline.hpp" 35#include "interpreter/interpreter.hpp" 36#include "memory/cardTableModRefBS.hpp" 37#include "memory/gcLocker.inline.hpp" 38#include "memory/genCollectedHeap.hpp" 39#include "memory/genRemSet.hpp" 40#include "memory/generation.hpp" 41#include "memory/metadataFactory.hpp" 42#include "memory/metaspaceShared.hpp" 43#include "memory/oopFactory.hpp" 44#include "memory/space.hpp" 45#include "memory/universe.hpp" 46#include "memory/universe.inline.hpp" 47#include "oops/constantPool.hpp" 48#include "oops/instanceClassLoaderKlass.hpp" 49#include "oops/instanceKlass.hpp" 50#include "oops/instanceMirrorKlass.hpp" 51#include "oops/instanceRefKlass.hpp" 52#include "oops/oop.inline.hpp" 53#include "oops/typeArrayKlass.hpp" 54#include "prims/jvmtiRedefineClassesTrace.hpp" 55#include "runtime/arguments.hpp" 56#include "runtime/deoptimization.hpp" 57#include "runtime/fprofiler.hpp" 58#include "runtime/handles.inline.hpp" 59#include "runtime/init.hpp" 60#include "runtime/java.hpp" 61#include "runtime/javaCalls.hpp" 62#include "runtime/sharedRuntime.hpp" 63#include "runtime/synchronizer.hpp" 64#include "runtime/thread.inline.hpp" 65#include "runtime/timer.hpp" 66#include "runtime/vm_operations.hpp" 67#include "services/memoryService.hpp" 68#include "utilities/copy.hpp" 69#include "utilities/events.hpp" 70#include "utilities/hashtable.inline.hpp" 71#include "utilities/preserveException.hpp" 72#include "utilities/macros.hpp" 73#if INCLUDE_ALL_GCS 74#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp" 75#include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp" 76#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 77#include "gc_implementation/g1/g1CollectorPolicy.hpp" 78#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" 79#endif // INCLUDE_ALL_GCS 80 81// Known objects 82Klass* Universe::_boolArrayKlassObj = NULL; 83Klass* Universe::_byteArrayKlassObj = NULL; 84Klass* Universe::_charArrayKlassObj = NULL; 85Klass* Universe::_intArrayKlassObj = NULL; 86Klass* Universe::_shortArrayKlassObj = NULL; 87Klass* Universe::_longArrayKlassObj = NULL; 88Klass* Universe::_singleArrayKlassObj = NULL; 89Klass* Universe::_doubleArrayKlassObj = NULL; 90Klass* Universe::_typeArrayKlassObjs[T_VOID+1] = { NULL /*, NULL...*/ }; 91Klass* Universe::_objectArrayKlassObj = NULL; 92oop Universe::_int_mirror = NULL; 93oop Universe::_float_mirror = NULL; 94oop Universe::_double_mirror = NULL; 95oop Universe::_byte_mirror = NULL; 96oop Universe::_bool_mirror = NULL; 97oop Universe::_char_mirror = NULL; 98oop Universe::_long_mirror = NULL; 99oop Universe::_short_mirror = NULL; 100oop Universe::_void_mirror = NULL; 101oop Universe::_mirrors[T_VOID+1] = { NULL /*, NULL...*/ }; 102oop Universe::_main_thread_group = NULL; 103oop Universe::_system_thread_group = NULL; 104objArrayOop Universe::_the_empty_class_klass_array = NULL; 105Array<Klass*>* Universe::_the_array_interfaces_array = NULL; 106oop Universe::_the_null_string = NULL; 107oop Universe::_the_min_jint_string = NULL; 108LatestMethodCache* Universe::_finalizer_register_cache = NULL; 109LatestMethodCache* Universe::_loader_addClass_cache = NULL; 110LatestMethodCache* Universe::_pd_implies_cache = NULL; 111oop Universe::_out_of_memory_error_java_heap = NULL; 112oop Universe::_out_of_memory_error_metaspace = NULL; 113oop Universe::_out_of_memory_error_class_metaspace = NULL; 114oop Universe::_out_of_memory_error_array_size = NULL; 115oop Universe::_out_of_memory_error_gc_overhead_limit = NULL; 116objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL; 117volatile jint Universe::_preallocated_out_of_memory_error_avail_count = 0; 118bool Universe::_verify_in_progress = false; 119oop Universe::_null_ptr_exception_instance = NULL; 120oop Universe::_arithmetic_exception_instance = NULL; 121oop Universe::_virtual_machine_error_instance = NULL; 122oop Universe::_vm_exception = NULL; 123Array<int>* Universe::_the_empty_int_array = NULL; 124Array<u2>* Universe::_the_empty_short_array = NULL; 125Array<Klass*>* Universe::_the_empty_klass_array = NULL; 126Array<Method*>* Universe::_the_empty_method_array = NULL; 127 128// These variables are guarded by FullGCALot_lock. 129debug_only(objArrayOop Universe::_fullgc_alot_dummy_array = NULL;) 130debug_only(int Universe::_fullgc_alot_dummy_next = 0;) 131 132// Heap 133int Universe::_verify_count = 0; 134 135int Universe::_base_vtable_size = 0; 136bool Universe::_bootstrapping = false; 137bool Universe::_fully_initialized = false; 138 139size_t Universe::_heap_capacity_at_last_gc; 140size_t Universe::_heap_used_at_last_gc = 0; 141 142CollectedHeap* Universe::_collectedHeap = NULL; 143 144NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true }; 145NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true }; 146address Universe::_narrow_ptrs_base; 147 148void Universe::basic_type_classes_do(void f(Klass*)) { 149 f(boolArrayKlassObj()); 150 f(byteArrayKlassObj()); 151 f(charArrayKlassObj()); 152 f(intArrayKlassObj()); 153 f(shortArrayKlassObj()); 154 f(longArrayKlassObj()); 155 f(singleArrayKlassObj()); 156 f(doubleArrayKlassObj()); 157} 158 159void Universe::oops_do(OopClosure* f, bool do_all) { 160 161 f->do_oop((oop*) &_int_mirror); 162 f->do_oop((oop*) &_float_mirror); 163 f->do_oop((oop*) &_double_mirror); 164 f->do_oop((oop*) &_byte_mirror); 165 f->do_oop((oop*) &_bool_mirror); 166 f->do_oop((oop*) &_char_mirror); 167 f->do_oop((oop*) &_long_mirror); 168 f->do_oop((oop*) &_short_mirror); 169 f->do_oop((oop*) &_void_mirror); 170 171 for (int i = T_BOOLEAN; i < T_VOID+1; i++) { 172 f->do_oop((oop*) &_mirrors[i]); 173 } 174 assert(_mirrors[0] == NULL && _mirrors[T_BOOLEAN - 1] == NULL, "checking"); 175 176 f->do_oop((oop*)&_the_empty_class_klass_array); 177 f->do_oop((oop*)&_the_null_string); 178 f->do_oop((oop*)&_the_min_jint_string); 179 f->do_oop((oop*)&_out_of_memory_error_java_heap); 180 f->do_oop((oop*)&_out_of_memory_error_metaspace); 181 f->do_oop((oop*)&_out_of_memory_error_class_metaspace); 182 f->do_oop((oop*)&_out_of_memory_error_array_size); 183 f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit); 184 f->do_oop((oop*)&_preallocated_out_of_memory_error_array); 185 f->do_oop((oop*)&_null_ptr_exception_instance); 186 f->do_oop((oop*)&_arithmetic_exception_instance); 187 f->do_oop((oop*)&_virtual_machine_error_instance); 188 f->do_oop((oop*)&_main_thread_group); 189 f->do_oop((oop*)&_system_thread_group); 190 f->do_oop((oop*)&_vm_exception); 191 debug_only(f->do_oop((oop*)&_fullgc_alot_dummy_array);) 192} 193 194// Serialize metadata in and out of CDS archive, not oops. 195void Universe::serialize(SerializeClosure* f, bool do_all) { 196 197 f->do_ptr((void**)&_boolArrayKlassObj); 198 f->do_ptr((void**)&_byteArrayKlassObj); 199 f->do_ptr((void**)&_charArrayKlassObj); 200 f->do_ptr((void**)&_intArrayKlassObj); 201 f->do_ptr((void**)&_shortArrayKlassObj); 202 f->do_ptr((void**)&_longArrayKlassObj); 203 f->do_ptr((void**)&_singleArrayKlassObj); 204 f->do_ptr((void**)&_doubleArrayKlassObj); 205 f->do_ptr((void**)&_objectArrayKlassObj); 206 207 { 208 for (int i = 0; i < T_VOID+1; i++) { 209 if (_typeArrayKlassObjs[i] != NULL) { 210 assert(i >= T_BOOLEAN, "checking"); 211 f->do_ptr((void**)&_typeArrayKlassObjs[i]); 212 } else if (do_all) { 213 f->do_ptr((void**)&_typeArrayKlassObjs[i]); 214 } 215 } 216 } 217 218 f->do_ptr((void**)&_the_array_interfaces_array); 219 f->do_ptr((void**)&_the_empty_int_array); 220 f->do_ptr((void**)&_the_empty_short_array); 221 f->do_ptr((void**)&_the_empty_method_array); 222 f->do_ptr((void**)&_the_empty_klass_array); 223 _finalizer_register_cache->serialize(f); 224 _loader_addClass_cache->serialize(f); 225 _pd_implies_cache->serialize(f); 226} 227 228void Universe::check_alignment(uintx size, uintx alignment, const char* name) { 229 if (size < alignment || size % alignment != 0) { 230 vm_exit_during_initialization( 231 err_msg("Size of %s (" UINTX_FORMAT " bytes) must be aligned to " UINTX_FORMAT " bytes", name, size, alignment)); 232 } 233} 234 235void initialize_basic_type_klass(Klass* k, TRAPS) { 236 Klass* ok = SystemDictionary::Object_klass(); 237 if (UseSharedSpaces) { 238 assert(k->super() == ok, "u3"); 239 k->restore_unshareable_info(CHECK); 240 } else { 241 k->initialize_supers(ok, CHECK); 242 } 243 k->append_to_sibling_list(); 244} 245 246void Universe::genesis(TRAPS) { 247 ResourceMark rm; 248 249 { FlagSetting fs(_bootstrapping, true); 250 251 { MutexLocker mc(Compile_lock); 252 253 // determine base vtable size; without that we cannot create the array klasses 254 compute_base_vtable_size(); 255 256 if (!UseSharedSpaces) { 257 _boolArrayKlassObj = TypeArrayKlass::create_klass(T_BOOLEAN, sizeof(jboolean), CHECK); 258 _charArrayKlassObj = TypeArrayKlass::create_klass(T_CHAR, sizeof(jchar), CHECK); 259 _singleArrayKlassObj = TypeArrayKlass::create_klass(T_FLOAT, sizeof(jfloat), CHECK); 260 _doubleArrayKlassObj = TypeArrayKlass::create_klass(T_DOUBLE, sizeof(jdouble), CHECK); 261 _byteArrayKlassObj = TypeArrayKlass::create_klass(T_BYTE, sizeof(jbyte), CHECK); 262 _shortArrayKlassObj = TypeArrayKlass::create_klass(T_SHORT, sizeof(jshort), CHECK); 263 _intArrayKlassObj = TypeArrayKlass::create_klass(T_INT, sizeof(jint), CHECK); 264 _longArrayKlassObj = TypeArrayKlass::create_klass(T_LONG, sizeof(jlong), CHECK); 265 266 _typeArrayKlassObjs[T_BOOLEAN] = _boolArrayKlassObj; 267 _typeArrayKlassObjs[T_CHAR] = _charArrayKlassObj; 268 _typeArrayKlassObjs[T_FLOAT] = _singleArrayKlassObj; 269 _typeArrayKlassObjs[T_DOUBLE] = _doubleArrayKlassObj; 270 _typeArrayKlassObjs[T_BYTE] = _byteArrayKlassObj; 271 _typeArrayKlassObjs[T_SHORT] = _shortArrayKlassObj; 272 _typeArrayKlassObjs[T_INT] = _intArrayKlassObj; 273 _typeArrayKlassObjs[T_LONG] = _longArrayKlassObj; 274 275 ClassLoaderData* null_cld = ClassLoaderData::the_null_class_loader_data(); 276 277 _the_array_interfaces_array = MetadataFactory::new_array<Klass*>(null_cld, 2, NULL, CHECK); 278 _the_empty_int_array = MetadataFactory::new_array<int>(null_cld, 0, CHECK); 279 _the_empty_short_array = MetadataFactory::new_array<u2>(null_cld, 0, CHECK); 280 _the_empty_method_array = MetadataFactory::new_array<Method*>(null_cld, 0, CHECK); 281 _the_empty_klass_array = MetadataFactory::new_array<Klass*>(null_cld, 0, CHECK); 282 } 283 } 284 285 vmSymbols::initialize(CHECK); 286 287 SystemDictionary::initialize(CHECK); 288 289 Klass* ok = SystemDictionary::Object_klass(); 290 291 _the_null_string = StringTable::intern("null", CHECK); 292 _the_min_jint_string = StringTable::intern("-2147483648", CHECK); 293 294 if (UseSharedSpaces) { 295 // Verify shared interfaces array. 296 assert(_the_array_interfaces_array->at(0) == 297 SystemDictionary::Cloneable_klass(), "u3"); 298 assert(_the_array_interfaces_array->at(1) == 299 SystemDictionary::Serializable_klass(), "u3"); 300 } else { 301 // Set up shared interfaces array. (Do this before supers are set up.) 302 _the_array_interfaces_array->at_put(0, SystemDictionary::Cloneable_klass()); 303 _the_array_interfaces_array->at_put(1, SystemDictionary::Serializable_klass()); 304 } 305 306 initialize_basic_type_klass(boolArrayKlassObj(), CHECK); 307 initialize_basic_type_klass(charArrayKlassObj(), CHECK); 308 initialize_basic_type_klass(singleArrayKlassObj(), CHECK); 309 initialize_basic_type_klass(doubleArrayKlassObj(), CHECK); 310 initialize_basic_type_klass(byteArrayKlassObj(), CHECK); 311 initialize_basic_type_klass(shortArrayKlassObj(), CHECK); 312 initialize_basic_type_klass(intArrayKlassObj(), CHECK); 313 initialize_basic_type_klass(longArrayKlassObj(), CHECK); 314 } // end of core bootstrapping 315 316 // Maybe this could be lifted up now that object array can be initialized 317 // during the bootstrapping. 318 319 // OLD 320 // Initialize _objectArrayKlass after core bootstraping to make 321 // sure the super class is set up properly for _objectArrayKlass. 322 // --- 323 // NEW 324 // Since some of the old system object arrays have been converted to 325 // ordinary object arrays, _objectArrayKlass will be loaded when 326 // SystemDictionary::initialize(CHECK); is run. See the extra check 327 // for Object_klass_loaded in objArrayKlassKlass::allocate_objArray_klass_impl. 328 _objectArrayKlassObj = InstanceKlass:: 329 cast(SystemDictionary::Object_klass())->array_klass(1, CHECK); 330 // OLD 331 // Add the class to the class hierarchy manually to make sure that 332 // its vtable is initialized after core bootstrapping is completed. 333 // --- 334 // New 335 // Have already been initialized. 336 _objectArrayKlassObj->append_to_sibling_list(); 337 338 // Compute is_jdk version flags. 339 // Only 1.3 or later has the java.lang.Shutdown class. 340 // Only 1.4 or later has the java.lang.CharSequence interface. 341 // Only 1.5 or later has the java.lang.management.MemoryUsage class. 342 if (JDK_Version::is_partially_initialized()) { 343 uint8_t jdk_version; 344 Klass* k = SystemDictionary::resolve_or_null( 345 vmSymbols::java_lang_management_MemoryUsage(), THREAD); 346 CLEAR_PENDING_EXCEPTION; // ignore exceptions 347 if (k == NULL) { 348 k = SystemDictionary::resolve_or_null( 349 vmSymbols::java_lang_CharSequence(), THREAD); 350 CLEAR_PENDING_EXCEPTION; // ignore exceptions 351 if (k == NULL) { 352 k = SystemDictionary::resolve_or_null( 353 vmSymbols::java_lang_Shutdown(), THREAD); 354 CLEAR_PENDING_EXCEPTION; // ignore exceptions 355 if (k == NULL) { 356 jdk_version = 2; 357 } else { 358 jdk_version = 3; 359 } 360 } else { 361 jdk_version = 4; 362 } 363 } else { 364 jdk_version = 5; 365 } 366 JDK_Version::fully_initialize(jdk_version); 367 } 368 369 #ifdef ASSERT 370 if (FullGCALot) { 371 // Allocate an array of dummy objects. 372 // We'd like these to be at the bottom of the old generation, 373 // so that when we free one and then collect, 374 // (almost) the whole heap moves 375 // and we find out if we actually update all the oops correctly. 376 // But we can't allocate directly in the old generation, 377 // so we allocate wherever, and hope that the first collection 378 // moves these objects to the bottom of the old generation. 379 // We can allocate directly in the permanent generation, so we do. 380 int size; 381 if (UseConcMarkSweepGC) { 382 warning("Using +FullGCALot with concurrent mark sweep gc " 383 "will not force all objects to relocate"); 384 size = FullGCALotDummies; 385 } else { 386 size = FullGCALotDummies * 2; 387 } 388 objArrayOop naked_array = oopFactory::new_objArray(SystemDictionary::Object_klass(), size, CHECK); 389 objArrayHandle dummy_array(THREAD, naked_array); 390 int i = 0; 391 while (i < size) { 392 // Allocate dummy in old generation 393 oop dummy = InstanceKlass::cast(SystemDictionary::Object_klass())->allocate_instance(CHECK); 394 dummy_array->obj_at_put(i++, dummy); 395 } 396 { 397 // Only modify the global variable inside the mutex. 398 // If we had a race to here, the other dummy_array instances 399 // and their elements just get dropped on the floor, which is fine. 400 MutexLocker ml(FullGCALot_lock); 401 if (_fullgc_alot_dummy_array == NULL) { 402 _fullgc_alot_dummy_array = dummy_array(); 403 } 404 } 405 assert(i == _fullgc_alot_dummy_array->length(), "just checking"); 406 } 407 #endif 408 409 // Initialize dependency array for null class loader 410 ClassLoaderData::the_null_class_loader_data()->init_dependencies(CHECK); 411 412} 413 414// CDS support for patching vtables in metadata in the shared archive. 415// All types inherited from Metadata have vtables, but not types inherited 416// from MetaspaceObj, because the latter does not have virtual functions. 417// If the metadata type has a vtable, it cannot be shared in the read-only 418// section of the CDS archive, because the vtable pointer is patched. 419static inline void add_vtable(void** list, int* n, void* o, int count) { 420 guarantee((*n) < count, "vtable list too small"); 421 void* vtable = dereference_vptr(o); 422 assert(*(void**)(vtable) != NULL, "invalid vtable"); 423 list[(*n)++] = vtable; 424} 425 426void Universe::init_self_patching_vtbl_list(void** list, int count) { 427 int n = 0; 428 { InstanceKlass o; add_vtable(list, &n, &o, count); } 429 { InstanceClassLoaderKlass o; add_vtable(list, &n, &o, count); } 430 { InstanceMirrorKlass o; add_vtable(list, &n, &o, count); } 431 { InstanceRefKlass o; add_vtable(list, &n, &o, count); } 432 { TypeArrayKlass o; add_vtable(list, &n, &o, count); } 433 { ObjArrayKlass o; add_vtable(list, &n, &o, count); } 434 { Method o; add_vtable(list, &n, &o, count); } 435 { ConstantPool o; add_vtable(list, &n, &o, count); } 436} 437 438void Universe::initialize_basic_type_mirrors(TRAPS) { 439 assert(_int_mirror==NULL, "basic type mirrors already initialized"); 440 _int_mirror = 441 java_lang_Class::create_basic_type_mirror("int", T_INT, CHECK); 442 _float_mirror = 443 java_lang_Class::create_basic_type_mirror("float", T_FLOAT, CHECK); 444 _double_mirror = 445 java_lang_Class::create_basic_type_mirror("double", T_DOUBLE, CHECK); 446 _byte_mirror = 447 java_lang_Class::create_basic_type_mirror("byte", T_BYTE, CHECK); 448 _bool_mirror = 449 java_lang_Class::create_basic_type_mirror("boolean",T_BOOLEAN, CHECK); 450 _char_mirror = 451 java_lang_Class::create_basic_type_mirror("char", T_CHAR, CHECK); 452 _long_mirror = 453 java_lang_Class::create_basic_type_mirror("long", T_LONG, CHECK); 454 _short_mirror = 455 java_lang_Class::create_basic_type_mirror("short", T_SHORT, CHECK); 456 _void_mirror = 457 java_lang_Class::create_basic_type_mirror("void", T_VOID, CHECK); 458 459 _mirrors[T_INT] = _int_mirror; 460 _mirrors[T_FLOAT] = _float_mirror; 461 _mirrors[T_DOUBLE] = _double_mirror; 462 _mirrors[T_BYTE] = _byte_mirror; 463 _mirrors[T_BOOLEAN] = _bool_mirror; 464 _mirrors[T_CHAR] = _char_mirror; 465 _mirrors[T_LONG] = _long_mirror; 466 _mirrors[T_SHORT] = _short_mirror; 467 _mirrors[T_VOID] = _void_mirror; 468 //_mirrors[T_OBJECT] = InstanceKlass::cast(_object_klass)->java_mirror(); 469 //_mirrors[T_ARRAY] = InstanceKlass::cast(_object_klass)->java_mirror(); 470} 471 472void Universe::fixup_mirrors(TRAPS) { 473 // Bootstrap problem: all classes gets a mirror (java.lang.Class instance) assigned eagerly, 474 // but we cannot do that for classes created before java.lang.Class is loaded. Here we simply 475 // walk over permanent objects created so far (mostly classes) and fixup their mirrors. Note 476 // that the number of objects allocated at this point is very small. 477 assert(SystemDictionary::Class_klass_loaded(), "java.lang.Class should be loaded"); 478 HandleMark hm(THREAD); 479 // Cache the start of the static fields 480 InstanceMirrorKlass::init_offset_of_static_fields(); 481 482 GrowableArray <Klass*>* list = java_lang_Class::fixup_mirror_list(); 483 int list_length = list->length(); 484 for (int i = 0; i < list_length; i++) { 485 Klass* k = list->at(i); 486 assert(k->is_klass(), "List should only hold classes"); 487 EXCEPTION_MARK; 488 KlassHandle kh(THREAD, k); 489 java_lang_Class::fixup_mirror(kh, CATCH); 490} 491 delete java_lang_Class::fixup_mirror_list(); 492 java_lang_Class::set_fixup_mirror_list(NULL); 493} 494 495static bool has_run_finalizers_on_exit = false; 496 497void Universe::run_finalizers_on_exit() { 498 if (has_run_finalizers_on_exit) return; 499 has_run_finalizers_on_exit = true; 500 501 // Called on VM exit. This ought to be run in a separate thread. 502 if (TraceReferenceGC) tty->print_cr("Callback to run finalizers on exit"); 503 { 504 PRESERVE_EXCEPTION_MARK; 505 KlassHandle finalizer_klass(THREAD, SystemDictionary::Finalizer_klass()); 506 JavaValue result(T_VOID); 507 JavaCalls::call_static( 508 &result, 509 finalizer_klass, 510 vmSymbols::run_finalizers_on_exit_name(), 511 vmSymbols::void_method_signature(), 512 THREAD 513 ); 514 // Ignore any pending exceptions 515 CLEAR_PENDING_EXCEPTION; 516 } 517} 518 519 520// initialize_vtable could cause gc if 521// 1) we specified true to initialize_vtable and 522// 2) this ran after gc was enabled 523// In case those ever change we use handles for oops 524void Universe::reinitialize_vtable_of(KlassHandle k_h, TRAPS) { 525 // init vtable of k and all subclasses 526 Klass* ko = k_h(); 527 klassVtable* vt = ko->vtable(); 528 if (vt) vt->initialize_vtable(false, CHECK); 529 if (ko->oop_is_instance()) { 530 InstanceKlass* ik = (InstanceKlass*)ko; 531 for (KlassHandle s_h(THREAD, ik->subklass()); 532 s_h() != NULL; 533 s_h = KlassHandle(THREAD, s_h()->next_sibling())) { 534 reinitialize_vtable_of(s_h, CHECK); 535 } 536 } 537} 538 539 540void initialize_itable_for_klass(Klass* k, TRAPS) { 541 InstanceKlass::cast(k)->itable()->initialize_itable(false, CHECK); 542} 543 544 545void Universe::reinitialize_itables(TRAPS) { 546 SystemDictionary::classes_do(initialize_itable_for_klass, CHECK); 547 548} 549 550 551bool Universe::on_page_boundary(void* addr) { 552 return ((uintptr_t) addr) % os::vm_page_size() == 0; 553} 554 555 556bool Universe::should_fill_in_stack_trace(Handle throwable) { 557 // never attempt to fill in the stack trace of preallocated errors that do not have 558 // backtrace. These errors are kept alive forever and may be "re-used" when all 559 // preallocated errors with backtrace have been consumed. Also need to avoid 560 // a potential loop which could happen if an out of memory occurs when attempting 561 // to allocate the backtrace. 562 return ((throwable() != Universe::_out_of_memory_error_java_heap) && 563 (throwable() != Universe::_out_of_memory_error_metaspace) && 564 (throwable() != Universe::_out_of_memory_error_class_metaspace) && 565 (throwable() != Universe::_out_of_memory_error_array_size) && 566 (throwable() != Universe::_out_of_memory_error_gc_overhead_limit)); 567} 568 569 570oop Universe::gen_out_of_memory_error(oop default_err) { 571 // generate an out of memory error: 572 // - if there is a preallocated error with backtrace available then return it wth 573 // a filled in stack trace. 574 // - if there are no preallocated errors with backtrace available then return 575 // an error without backtrace. 576 int next; 577 if (_preallocated_out_of_memory_error_avail_count > 0) { 578 next = (int)Atomic::add(-1, &_preallocated_out_of_memory_error_avail_count); 579 assert(next < (int)PreallocatedOutOfMemoryErrorCount, "avail count is corrupt"); 580 } else { 581 next = -1; 582 } 583 if (next < 0) { 584 // all preallocated errors have been used. 585 // return default 586 return default_err; 587 } else { 588 // get the error object at the slot and set set it to NULL so that the 589 // array isn't keeping it alive anymore. 590 oop exc = preallocated_out_of_memory_errors()->obj_at(next); 591 assert(exc != NULL, "slot has been used already"); 592 preallocated_out_of_memory_errors()->obj_at_put(next, NULL); 593 594 // use the message from the default error 595 oop msg = java_lang_Throwable::message(default_err); 596 assert(msg != NULL, "no message"); 597 java_lang_Throwable::set_message(exc, msg); 598 599 // populate the stack trace and return it. 600 java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(exc); 601 return exc; 602 } 603} 604 605intptr_t Universe::_non_oop_bits = 0; 606 607void* Universe::non_oop_word() { 608 // Neither the high bits nor the low bits of this value is allowed 609 // to look like (respectively) the high or low bits of a real oop. 610 // 611 // High and low are CPU-specific notions, but low always includes 612 // the low-order bit. Since oops are always aligned at least mod 4, 613 // setting the low-order bit will ensure that the low half of the 614 // word will never look like that of a real oop. 615 // 616 // Using the OS-supplied non-memory-address word (usually 0 or -1) 617 // will take care of the high bits, however many there are. 618 619 if (_non_oop_bits == 0) { 620 _non_oop_bits = (intptr_t)os::non_memory_address_word() | 1; 621 } 622 623 return (void*)_non_oop_bits; 624} 625 626jint universe_init() { 627 assert(!Universe::_fully_initialized, "called after initialize_vtables"); 628 guarantee(1 << LogHeapWordSize == sizeof(HeapWord), 629 "LogHeapWordSize is incorrect."); 630 guarantee(sizeof(oop) >= sizeof(HeapWord), "HeapWord larger than oop?"); 631 guarantee(sizeof(oop) % sizeof(HeapWord) == 0, 632 "oop size is not not a multiple of HeapWord size"); 633 TraceTime timer("Genesis", TraceStartupTime); 634 GC_locker::lock(); // do not allow gc during bootstrapping 635 JavaClasses::compute_hard_coded_offsets(); 636 637 jint status = Universe::initialize_heap(); 638 if (status != JNI_OK) { 639 return status; 640 } 641 642 Metaspace::global_initialize(); 643 644 // Create memory for metadata. Must be after initializing heap for 645 // DumpSharedSpaces. 646 ClassLoaderData::init_null_class_loader_data(); 647 648 // We have a heap so create the Method* caches before 649 // Metaspace::initialize_shared_spaces() tries to populate them. 650 Universe::_finalizer_register_cache = new LatestMethodCache(); 651 Universe::_loader_addClass_cache = new LatestMethodCache(); 652 Universe::_pd_implies_cache = new LatestMethodCache(); 653 654 if (UseSharedSpaces) { 655 // Read the data structures supporting the shared spaces (shared 656 // system dictionary, symbol table, etc.). After that, access to 657 // the file (other than the mapped regions) is no longer needed, and 658 // the file is closed. Closing the file does not affect the 659 // currently mapped regions. 660 MetaspaceShared::initialize_shared_spaces(); 661 StringTable::create_table(); 662 } else { 663 SymbolTable::create_table(); 664 StringTable::create_table(); 665 ClassLoader::create_package_info_table(); 666 } 667 668 return JNI_OK; 669} 670 671// Choose the heap base address and oop encoding mode 672// when compressed oops are used: 673// Unscaled - Use 32-bits oops without encoding when 674// NarrowOopHeapBaseMin + heap_size < 4Gb 675// ZeroBased - Use zero based compressed oops with encoding when 676// NarrowOopHeapBaseMin + heap_size < 32Gb 677// HeapBased - Use compressed oops with heap base + encoding. 678 679// 4Gb 680static const uint64_t UnscaledOopHeapMax = (uint64_t(max_juint) + 1); 681// 32Gb 682// OopEncodingHeapMax == UnscaledOopHeapMax << LogMinObjAlignmentInBytes; 683 684char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) { 685 assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be"); 686 assert(is_size_aligned((size_t)UnscaledOopHeapMax, alignment), "Must be"); 687 assert(is_size_aligned(heap_size, alignment), "Must be"); 688 689 uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment); 690 691 size_t base = 0; 692#ifdef _LP64 693 if (UseCompressedOops) { 694 assert(mode == UnscaledNarrowOop || 695 mode == ZeroBasedNarrowOop || 696 mode == HeapBasedNarrowOop, "mode is invalid"); 697 const size_t total_size = heap_size + heap_base_min_address_aligned; 698 // Return specified base for the first request. 699 if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) { 700 base = heap_base_min_address_aligned; 701 702 // If the total size is small enough to allow UnscaledNarrowOop then 703 // just use UnscaledNarrowOop. 704 } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) { 705 if ((total_size <= UnscaledOopHeapMax) && (mode == UnscaledNarrowOop) && 706 (Universe::narrow_oop_shift() == 0)) { 707 // Use 32-bits oops without encoding and 708 // place heap's top on the 4Gb boundary 709 base = (UnscaledOopHeapMax - heap_size); 710 } else { 711 // Can't reserve with NarrowOopShift == 0 712 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); 713 714 if (mode == UnscaledNarrowOop || 715 mode == ZeroBasedNarrowOop && total_size <= UnscaledOopHeapMax) { 716 717 // Use zero based compressed oops with encoding and 718 // place heap's top on the 32Gb boundary in case 719 // total_size > 4Gb or failed to reserve below 4Gb. 720 uint64_t heap_top = OopEncodingHeapMax; 721 722 // For small heaps, save some space for compressed class pointer 723 // space so it can be decoded with no base. 724 if (UseCompressedClassPointers && !UseSharedSpaces && 725 OopEncodingHeapMax <= 32*G) { 726 727 uint64_t class_space = align_size_up(CompressedClassSpaceSize, alignment); 728 assert(is_size_aligned((size_t)OopEncodingHeapMax-class_space, 729 alignment), "difference must be aligned too"); 730 uint64_t new_top = OopEncodingHeapMax-class_space; 731 732 if (total_size <= new_top) { 733 heap_top = new_top; 734 } 735 } 736 737 // Align base to the adjusted top of the heap 738 base = heap_top - heap_size; 739 } 740 } 741 } else { 742 // UnscaledNarrowOop encoding didn't work, and no base was found for ZeroBasedOops or 743 // HeapBasedNarrowOop encoding was requested. So, can't reserve below 32Gb. 744 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); 745 } 746 747 // Set narrow_oop_base and narrow_oop_use_implicit_null_checks 748 // used in ReservedHeapSpace() constructors. 749 // The final values will be set in initialize_heap() below. 750 if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax)) { 751 // Use zero based compressed oops 752 Universe::set_narrow_oop_base(NULL); 753 // Don't need guard page for implicit checks in indexed 754 // addressing mode with zero based Compressed Oops. 755 Universe::set_narrow_oop_use_implicit_null_checks(true); 756 } else { 757 // Set to a non-NULL value so the ReservedSpace ctor computes 758 // the correct no-access prefix. 759 // The final value will be set in initialize_heap() below. 760 Universe::set_narrow_oop_base((address)UnscaledOopHeapMax); 761#if defined(_WIN64) || defined(AIX) 762 if (UseLargePages) { 763 // Cannot allocate guard pages for implicit checks in indexed 764 // addressing mode when large pages are specified on windows. 765 Universe::set_narrow_oop_use_implicit_null_checks(false); 766 } 767#endif // _WIN64 768 } 769 } 770#endif 771 772 assert(is_ptr_aligned((char*)base, alignment), "Must be"); 773 return (char*)base; // also return NULL (don't care) for 32-bit VM 774} 775 776jint Universe::initialize_heap() { 777 778 if (UseParallelGC) { 779#if INCLUDE_ALL_GCS 780 Universe::_collectedHeap = new ParallelScavengeHeap(); 781#else // INCLUDE_ALL_GCS 782 fatal("UseParallelGC not supported in this VM."); 783#endif // INCLUDE_ALL_GCS 784 785 } else if (UseG1GC) { 786#if INCLUDE_ALL_GCS 787 G1CollectorPolicy* g1p = new G1CollectorPolicy(); 788 G1CollectedHeap* g1h = new G1CollectedHeap(g1p); 789 Universe::_collectedHeap = g1h; 790#else // INCLUDE_ALL_GCS 791 fatal("UseG1GC not supported in java kernel vm."); 792#endif // INCLUDE_ALL_GCS 793 794 } else { 795 GenCollectorPolicy *gc_policy; 796 797 if (UseSerialGC) { 798 gc_policy = new MarkSweepPolicy(); 799 } else if (UseConcMarkSweepGC) { 800#if INCLUDE_ALL_GCS 801 if (UseAdaptiveSizePolicy) { 802 gc_policy = new ASConcurrentMarkSweepPolicy(); 803 } else { 804 gc_policy = new ConcurrentMarkSweepPolicy(); 805 } 806#else // INCLUDE_ALL_GCS 807 fatal("UseConcMarkSweepGC not supported in this VM."); 808#endif // INCLUDE_ALL_GCS 809 } else { // default old generation 810 gc_policy = new MarkSweepPolicy(); 811 } 812 813 Universe::_collectedHeap = new GenCollectedHeap(gc_policy); 814 } 815 816 jint status = Universe::heap()->initialize(); 817 if (status != JNI_OK) { 818 return status; 819 } 820 821#ifdef _LP64 822 if (UseCompressedOops) { 823 // Subtract a page because something can get allocated at heap base. 824 // This also makes implicit null checking work, because the 825 // memory+1 page below heap_base needs to cause a signal. 826 // See needs_explicit_null_check. 827 // Only set the heap base for compressed oops because it indicates 828 // compressed oops for pstack code. 829 bool verbose = PrintCompressedOopsMode || (PrintMiscellaneous && Verbose); 830 if (verbose) { 831 tty->cr(); 832 tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB", 833 Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M); 834 } 835 if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax)) { 836 // Can't reserve heap below 32Gb. 837 // keep the Universe::narrow_oop_base() set in Universe::reserve_heap() 838 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); 839#ifdef AIX 840 // There is no protected page before the heap. This assures all oops 841 // are decoded so that NULL is preserved, so this page will not be accessed. 842 Universe::set_narrow_oop_use_implicit_null_checks(false); 843#endif 844 if (verbose) { 845 tty->print(", %s: "PTR_FORMAT, 846 narrow_oop_mode_to_string(HeapBasedNarrowOop), 847 Universe::narrow_oop_base()); 848 } 849 } else { 850 Universe::set_narrow_oop_base(0); 851 if (verbose) { 852 tty->print(", %s", narrow_oop_mode_to_string(ZeroBasedNarrowOop)); 853 } 854#ifdef _WIN64 855 if (!Universe::narrow_oop_use_implicit_null_checks()) { 856 // Don't need guard page for implicit checks in indexed addressing 857 // mode with zero based Compressed Oops. 858 Universe::set_narrow_oop_use_implicit_null_checks(true); 859 } 860#endif // _WIN64 861 if((uint64_t)Universe::heap()->reserved_region().end() > UnscaledOopHeapMax) { 862 // Can't reserve heap below 4Gb. 863 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); 864 } else { 865 Universe::set_narrow_oop_shift(0); 866 if (verbose) { 867 tty->print(", %s", narrow_oop_mode_to_string(UnscaledNarrowOop)); 868 } 869 } 870 } 871 872 if (verbose) { 873 tty->cr(); 874 tty->cr(); 875 } 876 Universe::set_narrow_ptrs_base(Universe::narrow_oop_base()); 877 } 878 // Universe::narrow_oop_base() is one page below the heap. 879 assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() - 880 os::vm_page_size()) || 881 Universe::narrow_oop_base() == NULL, "invalid value"); 882 assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes || 883 Universe::narrow_oop_shift() == 0, "invalid value"); 884#endif 885 886 // We will never reach the CATCH below since Exceptions::_throw will cause 887 // the VM to exit if an exception is thrown during initialization 888 889 if (UseTLAB) { 890 assert(Universe::heap()->supports_tlab_allocation(), 891 "Should support thread-local allocation buffers"); 892 ThreadLocalAllocBuffer::startup_initialization(); 893 } 894 return JNI_OK; 895} 896 897 898// Reserve the Java heap, which is now the same for all GCs. 899ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) { 900 assert(alignment <= Arguments::conservative_max_heap_alignment(), 901 err_msg("actual alignment "SIZE_FORMAT" must be within maximum heap alignment "SIZE_FORMAT, 902 alignment, Arguments::conservative_max_heap_alignment())); 903 size_t total_reserved = align_size_up(heap_size, alignment); 904 assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())), 905 "heap size is too big for compressed oops"); 906 907 bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size()); 908 assert(!UseLargePages 909 || UseParallelGC 910 || use_large_pages, "Wrong alignment to use large pages"); 911 912 char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop); 913 914 ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages, addr); 915 916 if (UseCompressedOops) { 917 if (addr != NULL && !total_rs.is_reserved()) { 918 // Failed to reserve at specified address - the requested memory 919 // region is taken already, for example, by 'java' launcher. 920 // Try again to reserver heap higher. 921 addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::ZeroBasedNarrowOop); 922 923 ReservedHeapSpace total_rs0(total_reserved, alignment, 924 use_large_pages, addr); 925 926 if (addr != NULL && !total_rs0.is_reserved()) { 927 // Failed to reserve at specified address again - give up. 928 addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::HeapBasedNarrowOop); 929 assert(addr == NULL, ""); 930 931 ReservedHeapSpace total_rs1(total_reserved, alignment, 932 use_large_pages, addr); 933 total_rs = total_rs1; 934 } else { 935 total_rs = total_rs0; 936 } 937 } 938 } 939 940 if (!total_rs.is_reserved()) { 941 vm_exit_during_initialization(err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap", total_reserved/K)); 942 return total_rs; 943 } 944 945 if (UseCompressedOops) { 946 // Universe::initialize_heap() will reset this to NULL if unscaled 947 // or zero-based narrow oops are actually used. 948 address base = (address)(total_rs.base() - os::vm_page_size()); 949 Universe::set_narrow_oop_base(base); 950 } 951 return total_rs; 952} 953 954 955// It's the caller's responsibility to ensure glitch-freedom 956// (if required). 957void Universe::update_heap_info_at_gc() { 958 _heap_capacity_at_last_gc = heap()->capacity(); 959 _heap_used_at_last_gc = heap()->used(); 960} 961 962 963const char* Universe::narrow_oop_mode_to_string(Universe::NARROW_OOP_MODE mode) { 964 switch (mode) { 965 case UnscaledNarrowOop: 966 return "32-bits Oops"; 967 case ZeroBasedNarrowOop: 968 return "zero based Compressed Oops"; 969 case HeapBasedNarrowOop: 970 return "Compressed Oops with base"; 971 } 972 973 ShouldNotReachHere(); 974 return ""; 975} 976 977 978Universe::NARROW_OOP_MODE Universe::narrow_oop_mode() { 979 if (narrow_oop_base() != 0) { 980 return HeapBasedNarrowOop; 981 } 982 983 if (narrow_oop_shift() != 0) { 984 return ZeroBasedNarrowOop; 985 } 986 987 return UnscaledNarrowOop; 988} 989 990 991void universe2_init() { 992 EXCEPTION_MARK; 993 Universe::genesis(CATCH); 994} 995 996 997// This function is defined in JVM.cpp 998extern void initialize_converter_functions(); 999 1000bool universe_post_init() { 1001 assert(!is_init_completed(), "Error: initialization not yet completed!"); 1002 Universe::_fully_initialized = true; 1003 EXCEPTION_MARK; 1004 { ResourceMark rm; 1005 Interpreter::initialize(); // needed for interpreter entry points 1006 if (!UseSharedSpaces) { 1007 HandleMark hm(THREAD); 1008 KlassHandle ok_h(THREAD, SystemDictionary::Object_klass()); 1009 Universe::reinitialize_vtable_of(ok_h, CHECK_false); 1010 Universe::reinitialize_itables(CHECK_false); 1011 } 1012 } 1013 1014 HandleMark hm(THREAD); 1015 Klass* k; 1016 instanceKlassHandle k_h; 1017 // Setup preallocated empty java.lang.Class array 1018 Universe::_the_empty_class_klass_array = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_false); 1019 1020 // Setup preallocated OutOfMemoryError errors 1021 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_OutOfMemoryError(), true, CHECK_false); 1022 k_h = instanceKlassHandle(THREAD, k); 1023 Universe::_out_of_memory_error_java_heap = k_h->allocate_instance(CHECK_false); 1024 Universe::_out_of_memory_error_metaspace = k_h->allocate_instance(CHECK_false); 1025 Universe::_out_of_memory_error_class_metaspace = k_h->allocate_instance(CHECK_false); 1026 Universe::_out_of_memory_error_array_size = k_h->allocate_instance(CHECK_false); 1027 Universe::_out_of_memory_error_gc_overhead_limit = 1028 k_h->allocate_instance(CHECK_false); 1029 1030 // Setup preallocated NullPointerException 1031 // (this is currently used for a cheap & dirty solution in compiler exception handling) 1032 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_NullPointerException(), true, CHECK_false); 1033 Universe::_null_ptr_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false); 1034 // Setup preallocated ArithmeticException 1035 // (this is currently used for a cheap & dirty solution in compiler exception handling) 1036 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_ArithmeticException(), true, CHECK_false); 1037 Universe::_arithmetic_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false); 1038 // Virtual Machine Error for when we get into a situation we can't resolve 1039 k = SystemDictionary::resolve_or_fail( 1040 vmSymbols::java_lang_VirtualMachineError(), true, CHECK_false); 1041 bool linked = InstanceKlass::cast(k)->link_class_or_fail(CHECK_false); 1042 if (!linked) { 1043 tty->print_cr("Unable to link/verify VirtualMachineError class"); 1044 return false; // initialization failed 1045 } 1046 Universe::_virtual_machine_error_instance = 1047 InstanceKlass::cast(k)->allocate_instance(CHECK_false); 1048 1049 Universe::_vm_exception = InstanceKlass::cast(k)->allocate_instance(CHECK_false); 1050 1051 if (!DumpSharedSpaces) { 1052 // These are the only Java fields that are currently set during shared space dumping. 1053 // We prefer to not handle this generally, so we always reinitialize these detail messages. 1054 Handle msg = java_lang_String::create_from_str("Java heap space", CHECK_false); 1055 java_lang_Throwable::set_message(Universe::_out_of_memory_error_java_heap, msg()); 1056 1057 msg = java_lang_String::create_from_str("Metaspace", CHECK_false); 1058 java_lang_Throwable::set_message(Universe::_out_of_memory_error_metaspace, msg()); 1059 msg = java_lang_String::create_from_str("Compressed class space", CHECK_false); 1060 java_lang_Throwable::set_message(Universe::_out_of_memory_error_class_metaspace, msg()); 1061 1062 msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false); 1063 java_lang_Throwable::set_message(Universe::_out_of_memory_error_array_size, msg()); 1064 1065 msg = java_lang_String::create_from_str("GC overhead limit exceeded", CHECK_false); 1066 java_lang_Throwable::set_message(Universe::_out_of_memory_error_gc_overhead_limit, msg()); 1067 1068 msg = java_lang_String::create_from_str("/ by zero", CHECK_false); 1069 java_lang_Throwable::set_message(Universe::_arithmetic_exception_instance, msg()); 1070 1071 // Setup the array of errors that have preallocated backtrace 1072 k = Universe::_out_of_memory_error_java_heap->klass(); 1073 assert(k->name() == vmSymbols::java_lang_OutOfMemoryError(), "should be out of memory error"); 1074 k_h = instanceKlassHandle(THREAD, k); 1075 1076 int len = (StackTraceInThrowable) ? (int)PreallocatedOutOfMemoryErrorCount : 0; 1077 Universe::_preallocated_out_of_memory_error_array = oopFactory::new_objArray(k_h(), len, CHECK_false); 1078 for (int i=0; i<len; i++) { 1079 oop err = k_h->allocate_instance(CHECK_false); 1080 Handle err_h = Handle(THREAD, err); 1081 java_lang_Throwable::allocate_backtrace(err_h, CHECK_false); 1082 Universe::preallocated_out_of_memory_errors()->obj_at_put(i, err_h()); 1083 } 1084 Universe::_preallocated_out_of_memory_error_avail_count = (jint)len; 1085 } 1086 1087 1088 // Setup static method for registering finalizers 1089 // The finalizer klass must be linked before looking up the method, in 1090 // case it needs to get rewritten. 1091 InstanceKlass::cast(SystemDictionary::Finalizer_klass())->link_class(CHECK_false); 1092 Method* m = InstanceKlass::cast(SystemDictionary::Finalizer_klass())->find_method( 1093 vmSymbols::register_method_name(), 1094 vmSymbols::register_method_signature()); 1095 if (m == NULL || !m->is_static()) { 1096 tty->print_cr("Unable to link/verify Finalizer.register method"); 1097 return false; // initialization failed (cannot throw exception yet) 1098 } 1099 Universe::_finalizer_register_cache->init( 1100 SystemDictionary::Finalizer_klass(), m); 1101 1102 // Setup method for registering loaded classes in class loader vector 1103 InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->link_class(CHECK_false); 1104 m = InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature()); 1105 if (m == NULL || m->is_static()) { 1106 tty->print_cr("Unable to link/verify ClassLoader.addClass method"); 1107 return false; // initialization failed (cannot throw exception yet) 1108 } 1109 Universe::_loader_addClass_cache->init( 1110 SystemDictionary::ClassLoader_klass(), m); 1111 1112 // Setup method for checking protection domain 1113 InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->link_class(CHECK_false); 1114 m = InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())-> 1115 find_method(vmSymbols::impliesCreateAccessControlContext_name(), 1116 vmSymbols::void_boolean_signature()); 1117 // Allow NULL which should only happen with bootstrapping. 1118 if (m != NULL) { 1119 if (m->is_static()) { 1120 // NoSuchMethodException doesn't actually work because it tries to run the 1121 // <init> function before java_lang_Class is linked. Print error and exit. 1122 tty->print_cr("ProtectionDomain.impliesCreateAccessControlContext() has the wrong linkage"); 1123 return false; // initialization failed 1124 } 1125 Universe::_pd_implies_cache->init( 1126 SystemDictionary::ProtectionDomain_klass(), m);; 1127 } 1128 1129 // The folowing is initializing converter functions for serialization in 1130 // JVM.cpp. If we clean up the StrictMath code above we may want to find 1131 // a better solution for this as well. 1132 initialize_converter_functions(); 1133 1134 // This needs to be done before the first scavenge/gc, since 1135 // it's an input to soft ref clearing policy. 1136 { 1137 MutexLocker x(Heap_lock); 1138 Universe::update_heap_info_at_gc(); 1139 } 1140 1141 // ("weak") refs processing infrastructure initialization 1142 Universe::heap()->post_initialize(); 1143 1144 // Initialize performance counters for metaspaces 1145 MetaspaceCounters::initialize_performance_counters(); 1146 CompressedClassSpaceCounters::initialize_performance_counters(); 1147 1148 MemoryService::add_metaspace_memory_pools(); 1149 1150 GC_locker::unlock(); // allow gc after bootstrapping 1151 1152 MemoryService::set_universe_heap(Universe::_collectedHeap); 1153 return true; 1154} 1155 1156 1157void Universe::compute_base_vtable_size() { 1158 _base_vtable_size = ClassLoader::compute_Object_vtable(); 1159} 1160 1161 1162// %%% The Universe::flush_foo methods belong in CodeCache. 1163 1164// Flushes compiled methods dependent on dependee. 1165void Universe::flush_dependents_on(instanceKlassHandle dependee) { 1166 assert_lock_strong(Compile_lock); 1167 1168 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return; 1169 1170 // CodeCache can only be updated by a thread_in_VM and they will all be 1171 // stopped dring the safepoint so CodeCache will be safe to update without 1172 // holding the CodeCache_lock. 1173 1174 KlassDepChange changes(dependee); 1175 1176 // Compute the dependent nmethods 1177 if (CodeCache::mark_for_deoptimization(changes) > 0) { 1178 // At least one nmethod has been marked for deoptimization 1179 VM_Deoptimize op; 1180 VMThread::execute(&op); 1181 } 1182} 1183 1184// Flushes compiled methods dependent on a particular CallSite 1185// instance when its target is different than the given MethodHandle. 1186void Universe::flush_dependents_on(Handle call_site, Handle method_handle) { 1187 assert_lock_strong(Compile_lock); 1188 1189 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return; 1190 1191 // CodeCache can only be updated by a thread_in_VM and they will all be 1192 // stopped dring the safepoint so CodeCache will be safe to update without 1193 // holding the CodeCache_lock. 1194 1195 CallSiteDepChange changes(call_site(), method_handle()); 1196 1197 // Compute the dependent nmethods that have a reference to a 1198 // CallSite object. We use InstanceKlass::mark_dependent_nmethod 1199 // directly instead of CodeCache::mark_for_deoptimization because we 1200 // want dependents on the call site class only not all classes in 1201 // the ContextStream. 1202 int marked = 0; 1203 { 1204 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1205 InstanceKlass* call_site_klass = InstanceKlass::cast(call_site->klass()); 1206 marked = call_site_klass->mark_dependent_nmethods(changes); 1207 } 1208 if (marked > 0) { 1209 // At least one nmethod has been marked for deoptimization 1210 VM_Deoptimize op; 1211 VMThread::execute(&op); 1212 } 1213} 1214 1215#ifdef HOTSWAP 1216// Flushes compiled methods dependent on dependee in the evolutionary sense 1217void Universe::flush_evol_dependents_on(instanceKlassHandle ev_k_h) { 1218 // --- Compile_lock is not held. However we are at a safepoint. 1219 assert_locked_or_safepoint(Compile_lock); 1220 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return; 1221 1222 // CodeCache can only be updated by a thread_in_VM and they will all be 1223 // stopped dring the safepoint so CodeCache will be safe to update without 1224 // holding the CodeCache_lock. 1225 1226 // Compute the dependent nmethods 1227 if (CodeCache::mark_for_evol_deoptimization(ev_k_h) > 0) { 1228 // At least one nmethod has been marked for deoptimization 1229 1230 // All this already happens inside a VM_Operation, so we'll do all the work here. 1231 // Stuff copied from VM_Deoptimize and modified slightly. 1232 1233 // We do not want any GCs to happen while we are in the middle of this VM operation 1234 ResourceMark rm; 1235 DeoptimizationMarker dm; 1236 1237 // Deoptimize all activations depending on marked nmethods 1238 Deoptimization::deoptimize_dependents(); 1239 1240 // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies) 1241 CodeCache::make_marked_nmethods_not_entrant(); 1242 } 1243} 1244#endif // HOTSWAP 1245 1246 1247// Flushes compiled methods dependent on dependee 1248void Universe::flush_dependents_on_method(methodHandle m_h) { 1249 // --- Compile_lock is not held. However we are at a safepoint. 1250 assert_locked_or_safepoint(Compile_lock); 1251 1252 // CodeCache can only be updated by a thread_in_VM and they will all be 1253 // stopped dring the safepoint so CodeCache will be safe to update without 1254 // holding the CodeCache_lock. 1255 1256 // Compute the dependent nmethods 1257 if (CodeCache::mark_for_deoptimization(m_h()) > 0) { 1258 // At least one nmethod has been marked for deoptimization 1259 1260 // All this already happens inside a VM_Operation, so we'll do all the work here. 1261 // Stuff copied from VM_Deoptimize and modified slightly. 1262 1263 // We do not want any GCs to happen while we are in the middle of this VM operation 1264 ResourceMark rm; 1265 DeoptimizationMarker dm; 1266 1267 // Deoptimize all activations depending on marked nmethods 1268 Deoptimization::deoptimize_dependents(); 1269 1270 // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies) 1271 CodeCache::make_marked_nmethods_not_entrant(); 1272 } 1273} 1274 1275void Universe::print() { 1276 print_on(gclog_or_tty); 1277} 1278 1279void Universe::print_on(outputStream* st, bool extended) { 1280 st->print_cr("Heap"); 1281 if (!extended) { 1282 heap()->print_on(st); 1283 } else { 1284 heap()->print_extended_on(st); 1285 } 1286} 1287 1288void Universe::print_heap_at_SIGBREAK() { 1289 if (PrintHeapAtSIGBREAK) { 1290 MutexLocker hl(Heap_lock); 1291 print_on(tty); 1292 tty->cr(); 1293 tty->flush(); 1294 } 1295} 1296 1297void Universe::print_heap_before_gc(outputStream* st, bool ignore_extended) { 1298 st->print_cr("{Heap before GC invocations=%u (full %u):", 1299 heap()->total_collections(), 1300 heap()->total_full_collections()); 1301 if (!PrintHeapAtGCExtended || ignore_extended) { 1302 heap()->print_on(st); 1303 } else { 1304 heap()->print_extended_on(st); 1305 } 1306} 1307 1308void Universe::print_heap_after_gc(outputStream* st, bool ignore_extended) { 1309 st->print_cr("Heap after GC invocations=%u (full %u):", 1310 heap()->total_collections(), 1311 heap()->total_full_collections()); 1312 if (!PrintHeapAtGCExtended || ignore_extended) { 1313 heap()->print_on(st); 1314 } else { 1315 heap()->print_extended_on(st); 1316 } 1317 st->print_cr("}"); 1318} 1319 1320void Universe::verify(VerifyOption option, const char* prefix, bool silent) { 1321 // The use of _verify_in_progress is a temporary work around for 1322 // 6320749. Don't bother with a creating a class to set and clear 1323 // it since it is only used in this method and the control flow is 1324 // straight forward. 1325 _verify_in_progress = true; 1326 1327 COMPILER2_PRESENT( 1328 assert(!DerivedPointerTable::is_active(), 1329 "DPT should not be active during verification " 1330 "(of thread stacks below)"); 1331 ) 1332 1333 ResourceMark rm; 1334 HandleMark hm; // Handles created during verification can be zapped 1335 _verify_count++; 1336 1337 if (!silent) gclog_or_tty->print(prefix); 1338 if (!silent) gclog_or_tty->print("[Verifying "); 1339 if (!silent) gclog_or_tty->print("threads "); 1340 Threads::verify(); 1341 if (!silent) gclog_or_tty->print("heap "); 1342 heap()->verify(silent, option); 1343 if (!silent) gclog_or_tty->print("syms "); 1344 SymbolTable::verify(); 1345 if (!silent) gclog_or_tty->print("strs "); 1346 StringTable::verify(); 1347 { 1348 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1349 if (!silent) gclog_or_tty->print("zone "); 1350 CodeCache::verify(); 1351 } 1352 if (!silent) gclog_or_tty->print("dict "); 1353 SystemDictionary::verify(); 1354#ifndef PRODUCT 1355 if (!silent) gclog_or_tty->print("cldg "); 1356 ClassLoaderDataGraph::verify(); 1357#endif 1358 if (!silent) gclog_or_tty->print("metaspace chunks "); 1359 MetaspaceAux::verify_free_chunks(); 1360 if (!silent) gclog_or_tty->print("hand "); 1361 JNIHandles::verify(); 1362 if (!silent) gclog_or_tty->print("C-heap "); 1363 os::check_heap(); 1364 if (!silent) gclog_or_tty->print("code cache "); 1365 CodeCache::verify_oops(); 1366 if (!silent) gclog_or_tty->print_cr("]"); 1367 1368 _verify_in_progress = false; 1369} 1370 1371// Oop verification (see MacroAssembler::verify_oop) 1372 1373static uintptr_t _verify_oop_data[2] = {0, (uintptr_t)-1}; 1374static uintptr_t _verify_klass_data[2] = {0, (uintptr_t)-1}; 1375 1376 1377#ifndef PRODUCT 1378 1379static void calculate_verify_data(uintptr_t verify_data[2], 1380 HeapWord* low_boundary, 1381 HeapWord* high_boundary) { 1382 assert(low_boundary < high_boundary, "bad interval"); 1383 1384 // decide which low-order bits we require to be clear: 1385 size_t alignSize = MinObjAlignmentInBytes; 1386 size_t min_object_size = CollectedHeap::min_fill_size(); 1387 1388 // make an inclusive limit: 1389 uintptr_t max = (uintptr_t)high_boundary - min_object_size*wordSize; 1390 uintptr_t min = (uintptr_t)low_boundary; 1391 assert(min < max, "bad interval"); 1392 uintptr_t diff = max ^ min; 1393 1394 // throw away enough low-order bits to make the diff vanish 1395 uintptr_t mask = (uintptr_t)(-1); 1396 while ((mask & diff) != 0) 1397 mask <<= 1; 1398 uintptr_t bits = (min & mask); 1399 assert(bits == (max & mask), "correct mask"); 1400 // check an intermediate value between min and max, just to make sure: 1401 assert(bits == ((min + (max-min)/2) & mask), "correct mask"); 1402 1403 // require address alignment, too: 1404 mask |= (alignSize - 1); 1405 1406 if (!(verify_data[0] == 0 && verify_data[1] == (uintptr_t)-1)) { 1407 assert(verify_data[0] == mask && verify_data[1] == bits, "mask stability"); 1408 } 1409 verify_data[0] = mask; 1410 verify_data[1] = bits; 1411} 1412 1413// Oop verification (see MacroAssembler::verify_oop) 1414 1415uintptr_t Universe::verify_oop_mask() { 1416 MemRegion m = heap()->reserved_region(); 1417 calculate_verify_data(_verify_oop_data, 1418 m.start(), 1419 m.end()); 1420 return _verify_oop_data[0]; 1421} 1422 1423 1424 1425uintptr_t Universe::verify_oop_bits() { 1426 verify_oop_mask(); 1427 return _verify_oop_data[1]; 1428} 1429 1430uintptr_t Universe::verify_mark_mask() { 1431 return markOopDesc::lock_mask_in_place; 1432} 1433 1434uintptr_t Universe::verify_mark_bits() { 1435 intptr_t mask = verify_mark_mask(); 1436 intptr_t bits = (intptr_t)markOopDesc::prototype(); 1437 assert((bits & ~mask) == 0, "no stray header bits"); 1438 return bits; 1439} 1440#endif // PRODUCT 1441 1442 1443void Universe::compute_verify_oop_data() { 1444 verify_oop_mask(); 1445 verify_oop_bits(); 1446 verify_mark_mask(); 1447 verify_mark_bits(); 1448} 1449 1450 1451void LatestMethodCache::init(Klass* k, Method* m) { 1452 if (!UseSharedSpaces) { 1453 _klass = k; 1454 } 1455#ifndef PRODUCT 1456 else { 1457 // sharing initilization should have already set up _klass 1458 assert(_klass != NULL, "just checking"); 1459 } 1460#endif 1461 1462 _method_idnum = m->method_idnum(); 1463 assert(_method_idnum >= 0, "sanity check"); 1464} 1465 1466 1467Method* LatestMethodCache::get_method() { 1468 if (klass() == NULL) return NULL; 1469 InstanceKlass* ik = InstanceKlass::cast(klass()); 1470 Method* m = ik->method_with_idnum(method_idnum()); 1471 assert(m != NULL, "sanity check"); 1472 return m; 1473} 1474 1475 1476#ifdef ASSERT 1477// Release dummy object(s) at bottom of heap 1478bool Universe::release_fullgc_alot_dummy() { 1479 MutexLocker ml(FullGCALot_lock); 1480 if (_fullgc_alot_dummy_array != NULL) { 1481 if (_fullgc_alot_dummy_next >= _fullgc_alot_dummy_array->length()) { 1482 // No more dummies to release, release entire array instead 1483 _fullgc_alot_dummy_array = NULL; 1484 return false; 1485 } 1486 if (!UseConcMarkSweepGC) { 1487 // Release dummy at bottom of old generation 1488 _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL); 1489 } 1490 // Release dummy at bottom of permanent generation 1491 _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL); 1492 } 1493 return true; 1494} 1495 1496#endif // ASSERT 1497